cam_mem_mgr.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/types.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/version.h>
  11. #include <linux/debugfs.h>
  12. #include "cam_compat.h"
  13. #include "cam_req_mgr_util.h"
  14. #include "cam_mem_mgr.h"
  15. #include "cam_smmu_api.h"
  16. #include "cam_debug_util.h"
  17. #include "cam_trace.h"
  18. #include "cam_common_util.h"
  19. static struct cam_mem_table tbl;
  20. static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
  21. static int cam_mem_util_get_dma_dir(uint32_t flags)
  22. {
  23. int rc = -EINVAL;
  24. if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
  25. rc = DMA_TO_DEVICE;
  26. else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
  27. rc = DMA_FROM_DEVICE;
  28. else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
  29. rc = DMA_BIDIRECTIONAL;
  30. else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  31. rc = DMA_BIDIRECTIONAL;
  32. return rc;
  33. }
  34. static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
  35. uintptr_t *vaddr,
  36. size_t *len)
  37. {
  38. int i, j, rc;
  39. void *addr;
  40. /*
  41. * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  42. * need to be called in pair to avoid stability issue.
  43. */
  44. rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  45. if (rc) {
  46. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  47. return rc;
  48. }
  49. /*
  50. * Code could be simplified if ION support of dma_buf_vmap is
  51. * available. This workaround takes the avandaage that ion_alloc
  52. * returns a virtually contiguous memory region, so we just need
  53. * to _kmap each individual page and then only use the virtual
  54. * address returned from the first call to _kmap.
  55. */
  56. for (i = 0; i < PAGE_ALIGN(dmabuf->size) / PAGE_SIZE; i++) {
  57. addr = dma_buf_kmap(dmabuf, i);
  58. if (IS_ERR_OR_NULL(addr)) {
  59. CAM_ERR(CAM_MEM, "kernel map fail");
  60. for (j = 0; j < i; j++)
  61. dma_buf_kunmap(dmabuf,
  62. j,
  63. (void *)(*vaddr + (j * PAGE_SIZE)));
  64. *vaddr = 0;
  65. *len = 0;
  66. rc = -ENOSPC;
  67. goto fail;
  68. }
  69. if (i == 0)
  70. *vaddr = (uint64_t)addr;
  71. }
  72. *len = dmabuf->size;
  73. return 0;
  74. fail:
  75. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  76. return rc;
  77. }
  78. static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
  79. uint64_t vaddr)
  80. {
  81. int i, rc = 0, page_num;
  82. if (!dmabuf || !vaddr) {
  83. CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
  84. return -EINVAL;
  85. }
  86. page_num = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;
  87. for (i = 0; i < page_num; i++) {
  88. dma_buf_kunmap(dmabuf, i,
  89. (void *)(vaddr + (i * PAGE_SIZE)));
  90. }
  91. /*
  92. * dma_buf_begin_cpu_access() and
  93. * dma_buf_end_cpu_access() need to be called in pair
  94. * to avoid stability issue.
  95. */
  96. rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  97. if (rc) {
  98. CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
  99. dmabuf);
  100. return rc;
  101. }
  102. return rc;
  103. }
  104. static int cam_mem_mgr_create_debug_fs(void)
  105. {
  106. tbl.dentry = debugfs_create_dir("camera_memmgr", NULL);
  107. if (!tbl.dentry) {
  108. CAM_ERR(CAM_MEM, "failed to create dentry");
  109. return -ENOMEM;
  110. }
  111. if (!debugfs_create_bool("alloc_profile_enable",
  112. 0644,
  113. tbl.dentry,
  114. &tbl.alloc_profile_enable)) {
  115. CAM_ERR(CAM_MEM,
  116. "failed to create alloc_profile_enable");
  117. goto err;
  118. }
  119. return 0;
  120. err:
  121. debugfs_remove_recursive(tbl.dentry);
  122. return -ENOMEM;
  123. }
  124. int cam_mem_mgr_init(void)
  125. {
  126. int i;
  127. int bitmap_size;
  128. memset(tbl.bufq, 0, sizeof(tbl.bufq));
  129. bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
  130. tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  131. if (!tbl.bitmap)
  132. return -ENOMEM;
  133. tbl.bits = bitmap_size * BITS_PER_BYTE;
  134. bitmap_zero(tbl.bitmap, tbl.bits);
  135. /* We need to reserve slot 0 because 0 is invalid */
  136. set_bit(0, tbl.bitmap);
  137. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  138. tbl.bufq[i].fd = -1;
  139. tbl.bufq[i].buf_handle = -1;
  140. }
  141. mutex_init(&tbl.m_lock);
  142. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
  143. cam_mem_mgr_create_debug_fs();
  144. return 0;
  145. }
  146. static int32_t cam_mem_get_slot(void)
  147. {
  148. int32_t idx;
  149. mutex_lock(&tbl.m_lock);
  150. idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
  151. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  152. mutex_unlock(&tbl.m_lock);
  153. return -ENOMEM;
  154. }
  155. set_bit(idx, tbl.bitmap);
  156. tbl.bufq[idx].active = true;
  157. mutex_init(&tbl.bufq[idx].q_lock);
  158. mutex_unlock(&tbl.m_lock);
  159. return idx;
  160. }
  161. static void cam_mem_put_slot(int32_t idx)
  162. {
  163. mutex_lock(&tbl.m_lock);
  164. mutex_lock(&tbl.bufq[idx].q_lock);
  165. tbl.bufq[idx].active = false;
  166. tbl.bufq[idx].is_internal = false;
  167. mutex_unlock(&tbl.bufq[idx].q_lock);
  168. mutex_destroy(&tbl.bufq[idx].q_lock);
  169. clear_bit(idx, tbl.bitmap);
  170. mutex_unlock(&tbl.m_lock);
  171. }
  172. int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
  173. dma_addr_t *iova_ptr, size_t *len_ptr)
  174. {
  175. int rc = 0, idx;
  176. *len_ptr = 0;
  177. if (!atomic_read(&cam_mem_mgr_state)) {
  178. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  179. return -EINVAL;
  180. }
  181. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  182. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  183. return -ENOENT;
  184. if (!tbl.bufq[idx].active)
  185. return -EAGAIN;
  186. mutex_lock(&tbl.bufq[idx].q_lock);
  187. if (buf_handle != tbl.bufq[idx].buf_handle) {
  188. rc = -EINVAL;
  189. goto handle_mismatch;
  190. }
  191. if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
  192. rc = cam_smmu_get_stage2_iova(mmu_handle,
  193. tbl.bufq[idx].fd,
  194. iova_ptr,
  195. len_ptr);
  196. else
  197. rc = cam_smmu_get_iova(mmu_handle,
  198. tbl.bufq[idx].fd,
  199. iova_ptr,
  200. len_ptr);
  201. if (rc) {
  202. CAM_ERR(CAM_MEM,
  203. "fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d",
  204. buf_handle, mmu_handle, tbl.bufq[idx].fd);
  205. goto handle_mismatch;
  206. }
  207. CAM_DBG(CAM_MEM,
  208. "handle:0x%x fd:%d iova_ptr:%pK len_ptr:%llu",
  209. mmu_handle, tbl.bufq[idx].fd, iova_ptr, *len_ptr);
  210. handle_mismatch:
  211. mutex_unlock(&tbl.bufq[idx].q_lock);
  212. return rc;
  213. }
  214. EXPORT_SYMBOL(cam_mem_get_io_buf);
  215. int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
  216. {
  217. int idx;
  218. if (!atomic_read(&cam_mem_mgr_state)) {
  219. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  220. return -EINVAL;
  221. }
  222. if (!atomic_read(&cam_mem_mgr_state)) {
  223. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  224. return -EINVAL;
  225. }
  226. if (!buf_handle || !vaddr_ptr || !len)
  227. return -EINVAL;
  228. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  229. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  230. return -EINVAL;
  231. if (!tbl.bufq[idx].active)
  232. return -EPERM;
  233. if (buf_handle != tbl.bufq[idx].buf_handle)
  234. return -EINVAL;
  235. if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
  236. return -EINVAL;
  237. if (tbl.bufq[idx].kmdvaddr) {
  238. *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
  239. *len = tbl.bufq[idx].len;
  240. } else {
  241. CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
  242. buf_handle);
  243. return -EINVAL;
  244. }
  245. return 0;
  246. }
  247. EXPORT_SYMBOL(cam_mem_get_cpu_buf);
  248. int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
  249. {
  250. int rc = 0, idx;
  251. uint32_t cache_dir;
  252. unsigned long dmabuf_flag = 0;
  253. if (!atomic_read(&cam_mem_mgr_state)) {
  254. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  255. return -EINVAL;
  256. }
  257. if (!cmd)
  258. return -EINVAL;
  259. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  260. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  261. return -EINVAL;
  262. mutex_lock(&tbl.bufq[idx].q_lock);
  263. if (!tbl.bufq[idx].active) {
  264. rc = -EINVAL;
  265. goto end;
  266. }
  267. if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
  268. rc = -EINVAL;
  269. goto end;
  270. }
  271. rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
  272. if (rc) {
  273. CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
  274. goto end;
  275. }
  276. if (dmabuf_flag & ION_FLAG_CACHED) {
  277. switch (cmd->mem_cache_ops) {
  278. case CAM_MEM_CLEAN_CACHE:
  279. cache_dir = DMA_TO_DEVICE;
  280. break;
  281. case CAM_MEM_INV_CACHE:
  282. cache_dir = DMA_FROM_DEVICE;
  283. break;
  284. case CAM_MEM_CLEAN_INV_CACHE:
  285. cache_dir = DMA_BIDIRECTIONAL;
  286. break;
  287. default:
  288. CAM_ERR(CAM_MEM,
  289. "invalid cache ops :%d", cmd->mem_cache_ops);
  290. rc = -EINVAL;
  291. goto end;
  292. }
  293. } else {
  294. CAM_DBG(CAM_MEM, "BUF is not cached");
  295. goto end;
  296. }
  297. rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
  298. (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
  299. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  300. if (rc) {
  301. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  302. goto end;
  303. }
  304. rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
  305. cache_dir);
  306. if (rc) {
  307. CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
  308. goto end;
  309. }
  310. end:
  311. mutex_unlock(&tbl.bufq[idx].q_lock);
  312. return rc;
  313. }
  314. EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
  315. static int cam_mem_util_get_dma_buf(size_t len,
  316. unsigned int heap_id_mask,
  317. unsigned int flags,
  318. struct dma_buf **buf)
  319. {
  320. int rc = 0;
  321. if (!buf) {
  322. CAM_ERR(CAM_MEM, "Invalid params");
  323. return -EINVAL;
  324. }
  325. *buf = ion_alloc(len, heap_id_mask, flags);
  326. if (IS_ERR_OR_NULL(*buf))
  327. return -ENOMEM;
  328. return rc;
  329. }
  330. static int cam_mem_util_get_dma_buf_fd(size_t len,
  331. size_t align,
  332. unsigned int heap_id_mask,
  333. unsigned int flags,
  334. struct dma_buf **buf,
  335. int *fd)
  336. {
  337. struct dma_buf *dmabuf = NULL;
  338. int rc = 0;
  339. struct timespec64 ts1, ts2;
  340. long microsec = 0;
  341. if (!buf || !fd) {
  342. CAM_ERR(CAM_MEM, "Invalid params, buf=%pK, fd=%pK", buf, fd);
  343. return -EINVAL;
  344. }
  345. if (tbl.alloc_profile_enable)
  346. CAM_GET_TIMESTAMP(ts1);
  347. *buf = ion_alloc(len, heap_id_mask, flags);
  348. if (IS_ERR_OR_NULL(*buf))
  349. return -ENOMEM;
  350. *fd = dma_buf_fd(*buf, O_CLOEXEC);
  351. if (*fd < 0) {
  352. CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
  353. rc = -EINVAL;
  354. goto get_fd_fail;
  355. }
  356. /*
  357. * increment the ref count so that ref count becomes 2 here
  358. * when we close fd, refcount becomes 1 and when we do
  359. * dmap_put_buf, ref count becomes 0 and memory will be freed.
  360. */
  361. dmabuf = dma_buf_get(*fd);
  362. if (IS_ERR_OR_NULL(dmabuf)) {
  363. CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d", *fd);
  364. rc = -EINVAL;
  365. }
  366. if (tbl.alloc_profile_enable) {
  367. CAM_GET_TIMESTAMP(ts2);
  368. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  369. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  370. len, microsec);
  371. }
  372. return rc;
  373. get_fd_fail:
  374. dma_buf_put(*buf);
  375. return rc;
  376. }
  377. static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
  378. struct dma_buf **dmabuf,
  379. int *fd)
  380. {
  381. uint32_t heap_id;
  382. uint32_t ion_flag = 0;
  383. int rc;
  384. if ((cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
  385. (cmd->flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
  386. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  387. ion_flag |=
  388. ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
  389. } else if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  390. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  391. ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
  392. } else {
  393. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  394. ION_HEAP(ION_CAMERA_HEAP_ID);
  395. }
  396. if (cmd->flags & CAM_MEM_FLAG_CACHE)
  397. ion_flag |= ION_FLAG_CACHED;
  398. else
  399. ion_flag &= ~ION_FLAG_CACHED;
  400. rc = cam_mem_util_get_dma_buf_fd(cmd->len,
  401. cmd->align,
  402. heap_id,
  403. ion_flag,
  404. dmabuf,
  405. fd);
  406. return rc;
  407. }
  408. static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
  409. {
  410. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  411. CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
  412. CAM_MEM_MMU_MAX_HANDLE);
  413. return -EINVAL;
  414. }
  415. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  416. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  417. CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
  418. return -EINVAL;
  419. }
  420. return 0;
  421. }
  422. static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
  423. {
  424. if (!cmd->flags) {
  425. CAM_ERR(CAM_MEM, "Invalid flags");
  426. return -EINVAL;
  427. }
  428. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  429. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  430. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  431. return -EINVAL;
  432. }
  433. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  434. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  435. CAM_ERR(CAM_MEM,
  436. "Kernel mapping in secure mode not allowed, flags=0x%x",
  437. cmd->flags);
  438. return -EINVAL;
  439. }
  440. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  441. CAM_ERR(CAM_MEM,
  442. "Shared memory buffers are not allowed to be mapped");
  443. return -EINVAL;
  444. }
  445. return 0;
  446. }
  447. static int cam_mem_util_map_hw_va(uint32_t flags,
  448. int32_t *mmu_hdls,
  449. int32_t num_hdls,
  450. int fd,
  451. dma_addr_t *hw_vaddr,
  452. size_t *len,
  453. enum cam_smmu_region_id region,
  454. bool is_internal)
  455. {
  456. int i;
  457. int rc = -1;
  458. int dir = cam_mem_util_get_dma_dir(flags);
  459. bool dis_delayed_unmap = false;
  460. if (dir < 0) {
  461. CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
  462. return dir;
  463. }
  464. if (flags & CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP)
  465. dis_delayed_unmap = true;
  466. CAM_DBG(CAM_MEM,
  467. "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d",
  468. fd, flags, dir, num_hdls);
  469. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  470. for (i = 0; i < num_hdls; i++) {
  471. rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
  472. fd,
  473. dir,
  474. hw_vaddr,
  475. len);
  476. if (rc < 0) {
  477. CAM_ERR(CAM_MEM,
  478. "Failed to securely map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
  479. i, fd, dir, mmu_hdls[i], rc);
  480. goto multi_map_fail;
  481. }
  482. }
  483. } else {
  484. for (i = 0; i < num_hdls; i++) {
  485. rc = cam_smmu_map_user_iova(mmu_hdls[i],
  486. fd,
  487. dis_delayed_unmap,
  488. dir,
  489. (dma_addr_t *)hw_vaddr,
  490. len,
  491. region,
  492. is_internal);
  493. if (rc < 0) {
  494. CAM_ERR(CAM_MEM,
  495. "Failed to map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, region=%d, rc=%d",
  496. i, fd, dir, mmu_hdls[i], region, rc);
  497. goto multi_map_fail;
  498. }
  499. }
  500. }
  501. return rc;
  502. multi_map_fail:
  503. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  504. for (--i; i > 0; i--)
  505. cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
  506. else
  507. for (--i; i > 0; i--)
  508. cam_smmu_unmap_user_iova(mmu_hdls[i],
  509. fd,
  510. CAM_SMMU_REGION_IO);
  511. return rc;
  512. }
  513. int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
  514. {
  515. int rc;
  516. int32_t idx;
  517. struct dma_buf *dmabuf = NULL;
  518. int fd = -1;
  519. dma_addr_t hw_vaddr = 0;
  520. size_t len;
  521. uintptr_t kvaddr = 0;
  522. size_t klen;
  523. if (!atomic_read(&cam_mem_mgr_state)) {
  524. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  525. return -EINVAL;
  526. }
  527. if (!cmd) {
  528. CAM_ERR(CAM_MEM, " Invalid argument");
  529. return -EINVAL;
  530. }
  531. len = cmd->len;
  532. rc = cam_mem_util_check_alloc_flags(cmd);
  533. if (rc) {
  534. CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
  535. cmd->flags, rc);
  536. return rc;
  537. }
  538. rc = cam_mem_util_ion_alloc(cmd,
  539. &dmabuf,
  540. &fd);
  541. if (rc) {
  542. CAM_ERR(CAM_MEM,
  543. "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
  544. cmd->len, cmd->align, cmd->flags, cmd->num_hdl);
  545. return rc;
  546. }
  547. idx = cam_mem_get_slot();
  548. if (idx < 0) {
  549. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  550. rc = -ENOMEM;
  551. goto slot_fail;
  552. }
  553. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  554. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  555. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  556. enum cam_smmu_region_id region;
  557. if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  558. region = CAM_SMMU_REGION_IO;
  559. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  560. region = CAM_SMMU_REGION_SHARED;
  561. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  562. region = CAM_SMMU_REGION_SECHEAP;
  563. rc = cam_mem_util_map_hw_va(cmd->flags,
  564. cmd->mmu_hdls,
  565. cmd->num_hdl,
  566. fd,
  567. &hw_vaddr,
  568. &len,
  569. region,
  570. true);
  571. if (rc) {
  572. CAM_ERR(CAM_MEM,
  573. "Failed in map_hw_va, len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  574. cmd->len, cmd->flags, fd, region,
  575. cmd->num_hdl, rc);
  576. goto map_hw_fail;
  577. }
  578. }
  579. mutex_lock(&tbl.bufq[idx].q_lock);
  580. tbl.bufq[idx].fd = fd;
  581. tbl.bufq[idx].dma_buf = NULL;
  582. tbl.bufq[idx].flags = cmd->flags;
  583. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
  584. tbl.bufq[idx].is_internal = true;
  585. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  586. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  587. if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  588. rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen);
  589. if (rc) {
  590. CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d",
  591. dmabuf, rc);
  592. goto map_kernel_fail;
  593. }
  594. }
  595. tbl.bufq[idx].kmdvaddr = kvaddr;
  596. tbl.bufq[idx].vaddr = hw_vaddr;
  597. tbl.bufq[idx].dma_buf = dmabuf;
  598. tbl.bufq[idx].len = cmd->len;
  599. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  600. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  601. sizeof(int32_t) * cmd->num_hdl);
  602. tbl.bufq[idx].is_imported = false;
  603. mutex_unlock(&tbl.bufq[idx].q_lock);
  604. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  605. cmd->out.fd = tbl.bufq[idx].fd;
  606. cmd->out.vaddr = 0;
  607. CAM_DBG(CAM_MEM,
  608. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
  609. cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  610. tbl.bufq[idx].len);
  611. return rc;
  612. map_kernel_fail:
  613. mutex_unlock(&tbl.bufq[idx].q_lock);
  614. map_hw_fail:
  615. cam_mem_put_slot(idx);
  616. slot_fail:
  617. dma_buf_put(dmabuf);
  618. return rc;
  619. }
  620. static bool cam_mem_util_is_map_internal(int32_t fd)
  621. {
  622. uint32_t i;
  623. bool is_internal = false;
  624. mutex_lock(&tbl.m_lock);
  625. for_each_set_bit(i, tbl.bitmap, tbl.bits) {
  626. if (tbl.bufq[i].fd == fd) {
  627. is_internal = tbl.bufq[i].is_internal;
  628. break;
  629. }
  630. }
  631. mutex_unlock(&tbl.m_lock);
  632. return is_internal;
  633. }
  634. int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
  635. {
  636. int32_t idx;
  637. int rc;
  638. struct dma_buf *dmabuf;
  639. dma_addr_t hw_vaddr = 0;
  640. size_t len = 0;
  641. bool is_internal = false;
  642. if (!atomic_read(&cam_mem_mgr_state)) {
  643. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  644. return -EINVAL;
  645. }
  646. if (!cmd || (cmd->fd < 0)) {
  647. CAM_ERR(CAM_MEM, "Invalid argument");
  648. return -EINVAL;
  649. }
  650. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  651. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  652. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  653. return -EINVAL;
  654. }
  655. rc = cam_mem_util_check_map_flags(cmd);
  656. if (rc) {
  657. CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
  658. return rc;
  659. }
  660. dmabuf = dma_buf_get(cmd->fd);
  661. if (IS_ERR_OR_NULL((void *)(dmabuf))) {
  662. CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
  663. return -EINVAL;
  664. }
  665. is_internal = cam_mem_util_is_map_internal(cmd->fd);
  666. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  667. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  668. rc = cam_mem_util_map_hw_va(cmd->flags,
  669. cmd->mmu_hdls,
  670. cmd->num_hdl,
  671. cmd->fd,
  672. &hw_vaddr,
  673. &len,
  674. CAM_SMMU_REGION_IO,
  675. is_internal);
  676. if (rc) {
  677. CAM_ERR(CAM_MEM,
  678. "Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  679. cmd->flags, cmd->fd, CAM_SMMU_REGION_IO,
  680. cmd->num_hdl, rc);
  681. goto map_fail;
  682. }
  683. }
  684. idx = cam_mem_get_slot();
  685. if (idx < 0) {
  686. rc = -ENOMEM;
  687. goto map_fail;
  688. }
  689. mutex_lock(&tbl.bufq[idx].q_lock);
  690. tbl.bufq[idx].fd = cmd->fd;
  691. tbl.bufq[idx].dma_buf = NULL;
  692. tbl.bufq[idx].flags = cmd->flags;
  693. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
  694. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  695. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  696. tbl.bufq[idx].kmdvaddr = 0;
  697. if (cmd->num_hdl > 0)
  698. tbl.bufq[idx].vaddr = hw_vaddr;
  699. else
  700. tbl.bufq[idx].vaddr = 0;
  701. tbl.bufq[idx].dma_buf = dmabuf;
  702. tbl.bufq[idx].len = len;
  703. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  704. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  705. sizeof(int32_t) * cmd->num_hdl);
  706. tbl.bufq[idx].is_imported = true;
  707. tbl.bufq[idx].is_internal = is_internal;
  708. mutex_unlock(&tbl.bufq[idx].q_lock);
  709. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  710. cmd->out.vaddr = 0;
  711. CAM_DBG(CAM_MEM,
  712. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
  713. cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  714. tbl.bufq[idx].len);
  715. return rc;
  716. map_fail:
  717. dma_buf_put(dmabuf);
  718. return rc;
  719. }
  720. static int cam_mem_util_unmap_hw_va(int32_t idx,
  721. enum cam_smmu_region_id region,
  722. enum cam_smmu_mapping_client client)
  723. {
  724. int i;
  725. uint32_t flags;
  726. int32_t *mmu_hdls;
  727. int num_hdls;
  728. int fd;
  729. int rc = 0;
  730. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  731. CAM_ERR(CAM_MEM, "Incorrect index");
  732. return -EINVAL;
  733. }
  734. flags = tbl.bufq[idx].flags;
  735. mmu_hdls = tbl.bufq[idx].hdls;
  736. num_hdls = tbl.bufq[idx].num_hdl;
  737. fd = tbl.bufq[idx].fd;
  738. CAM_DBG(CAM_MEM,
  739. "unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d",
  740. idx, fd, flags, num_hdls, client);
  741. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  742. for (i = 0; i < num_hdls; i++) {
  743. rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
  744. if (rc < 0) {
  745. CAM_ERR(CAM_MEM,
  746. "Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d",
  747. i, fd, mmu_hdls[i], rc);
  748. goto unmap_end;
  749. }
  750. }
  751. } else {
  752. for (i = 0; i < num_hdls; i++) {
  753. if (client == CAM_SMMU_MAPPING_USER) {
  754. rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
  755. fd, region);
  756. } else if (client == CAM_SMMU_MAPPING_KERNEL) {
  757. rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
  758. tbl.bufq[idx].dma_buf, region);
  759. } else {
  760. CAM_ERR(CAM_MEM,
  761. "invalid caller for unmapping : %d",
  762. client);
  763. rc = -EINVAL;
  764. }
  765. if (rc < 0) {
  766. CAM_ERR(CAM_MEM,
  767. "Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d",
  768. i, fd, mmu_hdls[i], region, rc);
  769. goto unmap_end;
  770. }
  771. }
  772. }
  773. return rc;
  774. unmap_end:
  775. CAM_ERR(CAM_MEM, "unmapping failed");
  776. return rc;
  777. }
  778. static void cam_mem_mgr_unmap_active_buf(int idx)
  779. {
  780. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  781. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  782. region = CAM_SMMU_REGION_SHARED;
  783. else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  784. region = CAM_SMMU_REGION_IO;
  785. cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
  786. }
  787. static int cam_mem_mgr_cleanup_table(void)
  788. {
  789. int i;
  790. mutex_lock(&tbl.m_lock);
  791. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  792. if (!tbl.bufq[i].active) {
  793. CAM_DBG(CAM_MEM,
  794. "Buffer inactive at idx=%d, continuing", i);
  795. continue;
  796. } else {
  797. CAM_DBG(CAM_MEM,
  798. "Active buffer at idx=%d, possible leak needs unmapping",
  799. i);
  800. cam_mem_mgr_unmap_active_buf(i);
  801. }
  802. mutex_lock(&tbl.bufq[i].q_lock);
  803. if (tbl.bufq[i].dma_buf) {
  804. dma_buf_put(tbl.bufq[i].dma_buf);
  805. tbl.bufq[i].dma_buf = NULL;
  806. }
  807. tbl.bufq[i].fd = -1;
  808. tbl.bufq[i].flags = 0;
  809. tbl.bufq[i].buf_handle = -1;
  810. tbl.bufq[i].vaddr = 0;
  811. tbl.bufq[i].len = 0;
  812. memset(tbl.bufq[i].hdls, 0,
  813. sizeof(int32_t) * tbl.bufq[i].num_hdl);
  814. tbl.bufq[i].num_hdl = 0;
  815. tbl.bufq[i].dma_buf = NULL;
  816. tbl.bufq[i].active = false;
  817. tbl.bufq[i].is_internal = false;
  818. mutex_unlock(&tbl.bufq[i].q_lock);
  819. mutex_destroy(&tbl.bufq[i].q_lock);
  820. }
  821. bitmap_zero(tbl.bitmap, tbl.bits);
  822. /* We need to reserve slot 0 because 0 is invalid */
  823. set_bit(0, tbl.bitmap);
  824. mutex_unlock(&tbl.m_lock);
  825. return 0;
  826. }
  827. void cam_mem_mgr_deinit(void)
  828. {
  829. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
  830. cam_mem_mgr_cleanup_table();
  831. mutex_lock(&tbl.m_lock);
  832. bitmap_zero(tbl.bitmap, tbl.bits);
  833. kfree(tbl.bitmap);
  834. tbl.bitmap = NULL;
  835. mutex_unlock(&tbl.m_lock);
  836. mutex_destroy(&tbl.m_lock);
  837. }
  838. static int cam_mem_util_unmap(int32_t idx,
  839. enum cam_smmu_mapping_client client)
  840. {
  841. int rc = 0;
  842. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  843. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  844. CAM_ERR(CAM_MEM, "Incorrect index");
  845. return -EINVAL;
  846. }
  847. CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
  848. mutex_lock(&tbl.m_lock);
  849. if ((!tbl.bufq[idx].active) &&
  850. (tbl.bufq[idx].vaddr) == 0) {
  851. CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  852. idx);
  853. mutex_unlock(&tbl.m_lock);
  854. return 0;
  855. }
  856. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
  857. if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
  858. rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  859. tbl.bufq[idx].kmdvaddr);
  860. if (rc)
  861. CAM_ERR(CAM_MEM,
  862. "Failed, dmabuf=%pK, kmdvaddr=%pK",
  863. tbl.bufq[idx].dma_buf,
  864. (void *) tbl.bufq[idx].kmdvaddr);
  865. }
  866. }
  867. /* SHARED flag gets precedence, all other flags after it */
  868. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  869. region = CAM_SMMU_REGION_SHARED;
  870. } else {
  871. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  872. region = CAM_SMMU_REGION_IO;
  873. }
  874. if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  875. (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  876. (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  877. if (cam_mem_util_unmap_hw_va(idx, region, client))
  878. CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
  879. tbl.bufq[idx].dma_buf);
  880. if (client == CAM_SMMU_MAPPING_KERNEL)
  881. tbl.bufq[idx].dma_buf = NULL;
  882. }
  883. mutex_lock(&tbl.bufq[idx].q_lock);
  884. tbl.bufq[idx].flags = 0;
  885. tbl.bufq[idx].buf_handle = -1;
  886. tbl.bufq[idx].vaddr = 0;
  887. memset(tbl.bufq[idx].hdls, 0,
  888. sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
  889. CAM_DBG(CAM_MEM,
  890. "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK",
  891. idx, tbl.bufq[idx].fd,
  892. tbl.bufq[idx].is_imported,
  893. tbl.bufq[idx].dma_buf);
  894. if (tbl.bufq[idx].dma_buf)
  895. dma_buf_put(tbl.bufq[idx].dma_buf);
  896. tbl.bufq[idx].fd = -1;
  897. tbl.bufq[idx].dma_buf = NULL;
  898. tbl.bufq[idx].is_imported = false;
  899. tbl.bufq[idx].is_internal = false;
  900. tbl.bufq[idx].len = 0;
  901. tbl.bufq[idx].num_hdl = 0;
  902. tbl.bufq[idx].active = false;
  903. mutex_unlock(&tbl.bufq[idx].q_lock);
  904. mutex_destroy(&tbl.bufq[idx].q_lock);
  905. clear_bit(idx, tbl.bitmap);
  906. mutex_unlock(&tbl.m_lock);
  907. return rc;
  908. }
  909. int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
  910. {
  911. int idx;
  912. int rc;
  913. if (!atomic_read(&cam_mem_mgr_state)) {
  914. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  915. return -EINVAL;
  916. }
  917. if (!cmd) {
  918. CAM_ERR(CAM_MEM, "Invalid argument");
  919. return -EINVAL;
  920. }
  921. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  922. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  923. CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
  924. idx);
  925. return -EINVAL;
  926. }
  927. if (!tbl.bufq[idx].active) {
  928. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  929. return -EINVAL;
  930. }
  931. if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
  932. CAM_ERR(CAM_MEM,
  933. "Released buf handle %d not matching within table %d, idx=%d",
  934. cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
  935. return -EINVAL;
  936. }
  937. CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
  938. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
  939. return rc;
  940. }
  941. int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
  942. struct cam_mem_mgr_memory_desc *out)
  943. {
  944. struct dma_buf *buf = NULL;
  945. int ion_fd = -1;
  946. int rc = 0;
  947. uint32_t heap_id;
  948. int32_t ion_flag = 0;
  949. uintptr_t kvaddr;
  950. dma_addr_t iova = 0;
  951. size_t request_len = 0;
  952. uint32_t mem_handle;
  953. int32_t idx;
  954. int32_t smmu_hdl = 0;
  955. int32_t num_hdl = 0;
  956. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  957. if (!atomic_read(&cam_mem_mgr_state)) {
  958. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  959. return -EINVAL;
  960. }
  961. if (!inp || !out) {
  962. CAM_ERR(CAM_MEM, "Invalid params");
  963. return -EINVAL;
  964. }
  965. if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
  966. inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
  967. inp->flags & CAM_MEM_FLAG_CACHE)) {
  968. CAM_ERR(CAM_MEM, "Invalid flags for request mem");
  969. return -EINVAL;
  970. }
  971. if (inp->flags & CAM_MEM_FLAG_CACHE)
  972. ion_flag |= ION_FLAG_CACHED;
  973. else
  974. ion_flag &= ~ION_FLAG_CACHED;
  975. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  976. ION_HEAP(ION_CAMERA_HEAP_ID);
  977. rc = cam_mem_util_get_dma_buf(inp->size,
  978. heap_id,
  979. ion_flag,
  980. &buf);
  981. if (rc) {
  982. CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
  983. goto ion_fail;
  984. } else {
  985. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  986. }
  987. /*
  988. * we are mapping kva always here,
  989. * update flags so that we do unmap properly
  990. */
  991. inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
  992. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  993. if (rc) {
  994. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  995. goto map_fail;
  996. }
  997. if (!inp->smmu_hdl) {
  998. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  999. rc = -EINVAL;
  1000. goto smmu_fail;
  1001. }
  1002. /* SHARED flag gets precedence, all other flags after it */
  1003. if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1004. region = CAM_SMMU_REGION_SHARED;
  1005. } else {
  1006. if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1007. region = CAM_SMMU_REGION_IO;
  1008. }
  1009. rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
  1010. buf,
  1011. CAM_SMMU_MAP_RW,
  1012. &iova,
  1013. &request_len,
  1014. region);
  1015. if (rc < 0) {
  1016. CAM_ERR(CAM_MEM, "SMMU mapping failed");
  1017. goto smmu_fail;
  1018. }
  1019. smmu_hdl = inp->smmu_hdl;
  1020. num_hdl = 1;
  1021. idx = cam_mem_get_slot();
  1022. if (idx < 0) {
  1023. rc = -ENOMEM;
  1024. goto slot_fail;
  1025. }
  1026. mutex_lock(&tbl.bufq[idx].q_lock);
  1027. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1028. tbl.bufq[idx].dma_buf = buf;
  1029. tbl.bufq[idx].fd = -1;
  1030. tbl.bufq[idx].flags = inp->flags;
  1031. tbl.bufq[idx].buf_handle = mem_handle;
  1032. tbl.bufq[idx].kmdvaddr = kvaddr;
  1033. tbl.bufq[idx].vaddr = iova;
  1034. tbl.bufq[idx].len = inp->size;
  1035. tbl.bufq[idx].num_hdl = num_hdl;
  1036. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1037. sizeof(int32_t));
  1038. tbl.bufq[idx].is_imported = false;
  1039. mutex_unlock(&tbl.bufq[idx].q_lock);
  1040. out->kva = kvaddr;
  1041. out->iova = (uint32_t)iova;
  1042. out->smmu_hdl = smmu_hdl;
  1043. out->mem_handle = mem_handle;
  1044. out->len = inp->size;
  1045. out->region = region;
  1046. return rc;
  1047. slot_fail:
  1048. cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
  1049. buf, region);
  1050. smmu_fail:
  1051. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  1052. map_fail:
  1053. dma_buf_put(buf);
  1054. ion_fail:
  1055. return rc;
  1056. }
  1057. EXPORT_SYMBOL(cam_mem_mgr_request_mem);
  1058. int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
  1059. {
  1060. int32_t idx;
  1061. int rc;
  1062. if (!atomic_read(&cam_mem_mgr_state)) {
  1063. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1064. return -EINVAL;
  1065. }
  1066. if (!inp) {
  1067. CAM_ERR(CAM_MEM, "Invalid argument");
  1068. return -EINVAL;
  1069. }
  1070. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1071. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1072. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1073. return -EINVAL;
  1074. }
  1075. if (!tbl.bufq[idx].active) {
  1076. if (tbl.bufq[idx].vaddr == 0) {
  1077. CAM_ERR(CAM_MEM, "buffer is released already");
  1078. return 0;
  1079. }
  1080. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1081. return -EINVAL;
  1082. }
  1083. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1084. CAM_ERR(CAM_MEM,
  1085. "Released buf handle not matching within table");
  1086. return -EINVAL;
  1087. }
  1088. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1089. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1090. return rc;
  1091. }
  1092. EXPORT_SYMBOL(cam_mem_mgr_release_mem);
  1093. int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
  1094. enum cam_smmu_region_id region,
  1095. struct cam_mem_mgr_memory_desc *out)
  1096. {
  1097. struct dma_buf *buf = NULL;
  1098. int rc = 0;
  1099. int ion_fd = -1;
  1100. uint32_t heap_id;
  1101. dma_addr_t iova = 0;
  1102. size_t request_len = 0;
  1103. uint32_t mem_handle;
  1104. int32_t idx;
  1105. int32_t smmu_hdl = 0;
  1106. int32_t num_hdl = 0;
  1107. if (!atomic_read(&cam_mem_mgr_state)) {
  1108. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1109. return -EINVAL;
  1110. }
  1111. if (!inp || !out) {
  1112. CAM_ERR(CAM_MEM, "Invalid param(s)");
  1113. return -EINVAL;
  1114. }
  1115. if (!inp->smmu_hdl) {
  1116. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1117. return -EINVAL;
  1118. }
  1119. if (region != CAM_SMMU_REGION_SECHEAP) {
  1120. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1121. return -EINVAL;
  1122. }
  1123. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  1124. ION_HEAP(ION_CAMERA_HEAP_ID);
  1125. rc = cam_mem_util_get_dma_buf(inp->size,
  1126. heap_id,
  1127. 0,
  1128. &buf);
  1129. if (rc) {
  1130. CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
  1131. goto ion_fail;
  1132. } else {
  1133. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1134. }
  1135. rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
  1136. buf,
  1137. &iova,
  1138. &request_len);
  1139. if (rc) {
  1140. CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
  1141. goto smmu_fail;
  1142. }
  1143. smmu_hdl = inp->smmu_hdl;
  1144. num_hdl = 1;
  1145. idx = cam_mem_get_slot();
  1146. if (idx < 0) {
  1147. rc = -ENOMEM;
  1148. goto slot_fail;
  1149. }
  1150. mutex_lock(&tbl.bufq[idx].q_lock);
  1151. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1152. tbl.bufq[idx].fd = -1;
  1153. tbl.bufq[idx].dma_buf = buf;
  1154. tbl.bufq[idx].flags = inp->flags;
  1155. tbl.bufq[idx].buf_handle = mem_handle;
  1156. tbl.bufq[idx].kmdvaddr = 0;
  1157. tbl.bufq[idx].vaddr = iova;
  1158. tbl.bufq[idx].len = request_len;
  1159. tbl.bufq[idx].num_hdl = num_hdl;
  1160. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1161. sizeof(int32_t));
  1162. tbl.bufq[idx].is_imported = false;
  1163. mutex_unlock(&tbl.bufq[idx].q_lock);
  1164. out->kva = 0;
  1165. out->iova = (uint32_t)iova;
  1166. out->smmu_hdl = smmu_hdl;
  1167. out->mem_handle = mem_handle;
  1168. out->len = request_len;
  1169. out->region = region;
  1170. return rc;
  1171. slot_fail:
  1172. cam_smmu_release_sec_heap(smmu_hdl);
  1173. smmu_fail:
  1174. dma_buf_put(buf);
  1175. ion_fail:
  1176. return rc;
  1177. }
  1178. EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
  1179. int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
  1180. {
  1181. int32_t idx;
  1182. int rc;
  1183. int32_t smmu_hdl;
  1184. if (!atomic_read(&cam_mem_mgr_state)) {
  1185. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1186. return -EINVAL;
  1187. }
  1188. if (!inp) {
  1189. CAM_ERR(CAM_MEM, "Invalid argument");
  1190. return -EINVAL;
  1191. }
  1192. if (inp->region != CAM_SMMU_REGION_SECHEAP) {
  1193. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1194. return -EINVAL;
  1195. }
  1196. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1197. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1198. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1199. return -EINVAL;
  1200. }
  1201. if (!tbl.bufq[idx].active) {
  1202. if (tbl.bufq[idx].vaddr == 0) {
  1203. CAM_ERR(CAM_MEM, "buffer is released already");
  1204. return 0;
  1205. }
  1206. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1207. return -EINVAL;
  1208. }
  1209. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1210. CAM_ERR(CAM_MEM,
  1211. "Released buf handle not matching within table");
  1212. return -EINVAL;
  1213. }
  1214. if (tbl.bufq[idx].num_hdl != 1) {
  1215. CAM_ERR(CAM_MEM,
  1216. "Sec heap region should have only one smmu hdl");
  1217. return -ENODEV;
  1218. }
  1219. memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
  1220. sizeof(int32_t));
  1221. if (inp->smmu_hdl != smmu_hdl) {
  1222. CAM_ERR(CAM_MEM,
  1223. "Passed SMMU handle doesn't match with internal hdl");
  1224. return -ENODEV;
  1225. }
  1226. rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
  1227. if (rc) {
  1228. CAM_ERR(CAM_MEM,
  1229. "Sec heap region release failed");
  1230. return -ENODEV;
  1231. }
  1232. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1233. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1234. if (rc)
  1235. CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
  1236. return rc;
  1237. }
  1238. EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);