cam_mem_mgr.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/types.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/version.h>
  11. #include <linux/debugfs.h>
  12. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  13. #include <linux/mem-buf.h>
  14. #include <soc/qcom/secure_buffer.h>
  15. #endif
  16. #include "cam_compat.h"
  17. #include "cam_req_mgr_util.h"
  18. #include "cam_mem_mgr.h"
  19. #include "cam_smmu_api.h"
  20. #include "cam_debug_util.h"
  21. #include "cam_trace.h"
  22. #include "cam_common_util.h"
  23. #define CAM_MEM_SHARED_BUFFER_PAD_4K (4 * 1024)
  24. static struct cam_mem_table tbl;
  25. static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
  26. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  27. static void cam_mem_mgr_put_dma_heaps(void);
  28. static int cam_mem_mgr_get_dma_heaps(void);
  29. #endif
  30. static void cam_mem_mgr_print_tbl(void)
  31. {
  32. int i;
  33. uint64_t ms, tmp, hrs, min, sec;
  34. struct timespec64 *ts = NULL;
  35. struct timespec64 current_ts;
  36. ktime_get_real_ts64(&(current_ts));
  37. tmp = current_ts.tv_sec;
  38. ms = (current_ts.tv_nsec) / 1000000;
  39. sec = do_div(tmp, 60);
  40. min = do_div(tmp, 60);
  41. hrs = do_div(tmp, 24);
  42. CAM_INFO(CAM_MEM, "***%llu:%llu:%llu:%llu Mem mgr table dump***",
  43. hrs, min, sec, ms);
  44. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  45. if (tbl.bufq[i].active) {
  46. ts = &tbl.bufq[i].timestamp;
  47. tmp = ts->tv_sec;
  48. ms = (ts->tv_nsec) / 1000000;
  49. sec = do_div(tmp, 60);
  50. min = do_div(tmp, 60);
  51. hrs = do_div(tmp, 24);
  52. CAM_INFO(CAM_MEM,
  53. "%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu",
  54. hrs, min, sec, ms, i, tbl.bufq[i].fd, tbl.bufq[i].i_ino,
  55. tbl.bufq[i].len);
  56. }
  57. }
  58. }
  59. static int cam_mem_util_get_dma_dir(uint32_t flags)
  60. {
  61. int rc = -EINVAL;
  62. if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
  63. rc = DMA_TO_DEVICE;
  64. else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
  65. rc = DMA_FROM_DEVICE;
  66. else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
  67. rc = DMA_BIDIRECTIONAL;
  68. else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  69. rc = DMA_BIDIRECTIONAL;
  70. return rc;
  71. }
  72. static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
  73. uintptr_t *vaddr,
  74. size_t *len)
  75. {
  76. int rc = 0;
  77. void *addr;
  78. /*
  79. * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  80. * need to be called in pair to avoid stability issue.
  81. */
  82. rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  83. if (rc) {
  84. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  85. return rc;
  86. }
  87. addr = dma_buf_vmap(dmabuf);
  88. if (!addr) {
  89. CAM_ERR(CAM_MEM, "kernel map fail");
  90. *vaddr = 0;
  91. *len = 0;
  92. rc = -ENOSPC;
  93. goto fail;
  94. }
  95. *vaddr = (uint64_t)addr;
  96. *len = dmabuf->size;
  97. return 0;
  98. fail:
  99. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  100. return rc;
  101. }
  102. static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
  103. uint64_t vaddr)
  104. {
  105. int rc = 0;
  106. if (!dmabuf || !vaddr) {
  107. CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
  108. return -EINVAL;
  109. }
  110. dma_buf_vunmap(dmabuf, (void *)vaddr);
  111. /*
  112. * dma_buf_begin_cpu_access() and
  113. * dma_buf_end_cpu_access() need to be called in pair
  114. * to avoid stability issue.
  115. */
  116. rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  117. if (rc) {
  118. CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
  119. dmabuf);
  120. return rc;
  121. }
  122. return rc;
  123. }
  124. static int cam_mem_mgr_create_debug_fs(void)
  125. {
  126. int rc = 0;
  127. struct dentry *dbgfileptr = NULL;
  128. dbgfileptr = debugfs_create_dir("camera_memmgr", NULL);
  129. if (!dbgfileptr) {
  130. CAM_ERR(CAM_MEM,"DebugFS could not create directory!");
  131. rc = -ENOENT;
  132. goto end;
  133. }
  134. /* Store parent inode for cleanup in caller */
  135. tbl.dentry = dbgfileptr;
  136. dbgfileptr = debugfs_create_bool("alloc_profile_enable", 0644,
  137. tbl.dentry, &tbl.alloc_profile_enable);
  138. if (IS_ERR(dbgfileptr)) {
  139. if (PTR_ERR(dbgfileptr) == -ENODEV)
  140. CAM_WARN(CAM_MEM, "DebugFS not enabled in kernel!");
  141. else
  142. rc = PTR_ERR(dbgfileptr);
  143. }
  144. end:
  145. return rc;
  146. }
  147. int cam_mem_mgr_init(void)
  148. {
  149. int i;
  150. int bitmap_size;
  151. int rc = 0;
  152. memset(tbl.bufq, 0, sizeof(tbl.bufq));
  153. if (cam_smmu_need_force_alloc_cached(&tbl.force_cache_allocs)) {
  154. CAM_ERR(CAM_MEM, "Error in getting force cache alloc flag");
  155. return -EINVAL;
  156. }
  157. tbl.need_shared_buffer_padding = cam_smmu_need_shared_buffer_padding();
  158. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  159. rc = cam_mem_mgr_get_dma_heaps();
  160. if (rc) {
  161. CAM_ERR(CAM_MEM, "Failed in getting dma heaps rc=%d", rc);
  162. return rc;
  163. }
  164. #endif
  165. bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
  166. tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  167. if (!tbl.bitmap) {
  168. rc = -ENOMEM;
  169. goto put_heaps;
  170. }
  171. tbl.bits = bitmap_size * BITS_PER_BYTE;
  172. bitmap_zero(tbl.bitmap, tbl.bits);
  173. /* We need to reserve slot 0 because 0 is invalid */
  174. set_bit(0, tbl.bitmap);
  175. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  176. tbl.bufq[i].fd = -1;
  177. tbl.bufq[i].buf_handle = -1;
  178. }
  179. mutex_init(&tbl.m_lock);
  180. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
  181. cam_mem_mgr_create_debug_fs();
  182. return 0;
  183. put_heaps:
  184. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  185. cam_mem_mgr_put_dma_heaps();
  186. #endif
  187. return rc;
  188. }
  189. static int32_t cam_mem_get_slot(void)
  190. {
  191. int32_t idx;
  192. mutex_lock(&tbl.m_lock);
  193. idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
  194. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  195. mutex_unlock(&tbl.m_lock);
  196. return -ENOMEM;
  197. }
  198. set_bit(idx, tbl.bitmap);
  199. tbl.bufq[idx].active = true;
  200. ktime_get_real_ts64(&(tbl.bufq[idx].timestamp));
  201. mutex_init(&tbl.bufq[idx].q_lock);
  202. mutex_unlock(&tbl.m_lock);
  203. return idx;
  204. }
  205. static void cam_mem_put_slot(int32_t idx)
  206. {
  207. mutex_lock(&tbl.m_lock);
  208. mutex_lock(&tbl.bufq[idx].q_lock);
  209. tbl.bufq[idx].active = false;
  210. tbl.bufq[idx].is_internal = false;
  211. memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
  212. mutex_unlock(&tbl.bufq[idx].q_lock);
  213. mutex_destroy(&tbl.bufq[idx].q_lock);
  214. clear_bit(idx, tbl.bitmap);
  215. mutex_unlock(&tbl.m_lock);
  216. }
  217. int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
  218. dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags)
  219. {
  220. int rc = 0, idx;
  221. *len_ptr = 0;
  222. if (!atomic_read(&cam_mem_mgr_state)) {
  223. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  224. return -EINVAL;
  225. }
  226. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  227. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  228. return -ENOENT;
  229. if (!tbl.bufq[idx].active) {
  230. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  231. idx);
  232. return -EAGAIN;
  233. }
  234. mutex_lock(&tbl.bufq[idx].q_lock);
  235. if (buf_handle != tbl.bufq[idx].buf_handle) {
  236. rc = -EINVAL;
  237. goto handle_mismatch;
  238. }
  239. if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
  240. rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
  241. iova_ptr, len_ptr);
  242. else
  243. rc = cam_smmu_get_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
  244. iova_ptr, len_ptr);
  245. if (rc) {
  246. CAM_ERR(CAM_MEM,
  247. "fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d i_ino:%lu",
  248. buf_handle, mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino);
  249. goto handle_mismatch;
  250. }
  251. if (flags)
  252. *flags = tbl.bufq[idx].flags;
  253. CAM_DBG(CAM_MEM,
  254. "handle:0x%x fd:%d i_ino:%lu iova_ptr:0x%llx len_ptr:%llu",
  255. mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, iova_ptr, *len_ptr);
  256. handle_mismatch:
  257. mutex_unlock(&tbl.bufq[idx].q_lock);
  258. return rc;
  259. }
  260. EXPORT_SYMBOL(cam_mem_get_io_buf);
  261. int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
  262. {
  263. int idx;
  264. if (!atomic_read(&cam_mem_mgr_state)) {
  265. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  266. return -EINVAL;
  267. }
  268. if (!buf_handle || !vaddr_ptr || !len)
  269. return -EINVAL;
  270. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  271. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  272. return -EINVAL;
  273. if (!tbl.bufq[idx].active) {
  274. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  275. idx);
  276. return -EPERM;
  277. }
  278. if (buf_handle != tbl.bufq[idx].buf_handle)
  279. return -EINVAL;
  280. if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
  281. return -EINVAL;
  282. if (tbl.bufq[idx].kmdvaddr) {
  283. *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
  284. *len = tbl.bufq[idx].len;
  285. } else {
  286. CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
  287. buf_handle);
  288. return -EINVAL;
  289. }
  290. return 0;
  291. }
  292. EXPORT_SYMBOL(cam_mem_get_cpu_buf);
  293. int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
  294. {
  295. int rc = 0, idx;
  296. uint32_t cache_dir;
  297. unsigned long dmabuf_flag = 0;
  298. if (!atomic_read(&cam_mem_mgr_state)) {
  299. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  300. return -EINVAL;
  301. }
  302. if (!cmd)
  303. return -EINVAL;
  304. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  305. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  306. return -EINVAL;
  307. mutex_lock(&tbl.bufq[idx].q_lock);
  308. if (!tbl.bufq[idx].active) {
  309. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  310. idx);
  311. rc = -EINVAL;
  312. goto end;
  313. }
  314. if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
  315. rc = -EINVAL;
  316. goto end;
  317. }
  318. rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
  319. if (rc) {
  320. CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
  321. goto end;
  322. }
  323. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  324. CAM_DBG(CAM_MEM, "Calling dmap buf APIs for cache operations");
  325. cache_dir = DMA_BIDIRECTIONAL;
  326. #else
  327. if (dmabuf_flag & ION_FLAG_CACHED) {
  328. switch (cmd->mem_cache_ops) {
  329. case CAM_MEM_CLEAN_CACHE:
  330. cache_dir = DMA_TO_DEVICE;
  331. break;
  332. case CAM_MEM_INV_CACHE:
  333. cache_dir = DMA_FROM_DEVICE;
  334. break;
  335. case CAM_MEM_CLEAN_INV_CACHE:
  336. cache_dir = DMA_BIDIRECTIONAL;
  337. break;
  338. default:
  339. CAM_ERR(CAM_MEM,
  340. "invalid cache ops :%d", cmd->mem_cache_ops);
  341. rc = -EINVAL;
  342. goto end;
  343. }
  344. } else {
  345. CAM_DBG(CAM_MEM, "BUF is not cached");
  346. goto end;
  347. }
  348. #endif
  349. rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
  350. (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
  351. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  352. if (rc) {
  353. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  354. goto end;
  355. }
  356. rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
  357. cache_dir);
  358. if (rc) {
  359. CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
  360. goto end;
  361. }
  362. end:
  363. mutex_unlock(&tbl.bufq[idx].q_lock);
  364. return rc;
  365. }
  366. EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
  367. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  368. #define CAM_MAX_VMIDS 4
  369. static void cam_mem_mgr_put_dma_heaps(void)
  370. {
  371. CAM_DBG(CAM_MEM, "Releasing DMA Buf heaps usage");
  372. }
  373. static int cam_mem_mgr_get_dma_heaps(void)
  374. {
  375. int rc = 0;
  376. tbl.system_heap = NULL;
  377. tbl.system_uncached_heap = NULL;
  378. tbl.camera_heap = NULL;
  379. tbl.camera_uncached_heap = NULL;
  380. tbl.secure_display_heap = NULL;
  381. tbl.system_heap = dma_heap_find("qcom,system");
  382. if (IS_ERR_OR_NULL(tbl.system_heap)) {
  383. rc = PTR_ERR(tbl.system_heap);
  384. CAM_ERR(CAM_MEM, "qcom system heap not found, rc=%d", rc);
  385. tbl.system_heap = NULL;
  386. goto put_heaps;
  387. }
  388. tbl.system_uncached_heap = dma_heap_find("qcom,system-uncached");
  389. if (IS_ERR_OR_NULL(tbl.system_uncached_heap)) {
  390. if (tbl.force_cache_allocs) {
  391. /* optional, we anyway do not use uncached */
  392. CAM_DBG(CAM_MEM,
  393. "qcom system-uncached heap not found, err=%d",
  394. PTR_ERR(tbl.system_uncached_heap));
  395. tbl.system_uncached_heap = NULL;
  396. } else {
  397. /* fatal, must need uncached heaps */
  398. rc = PTR_ERR(tbl.system_uncached_heap);
  399. CAM_ERR(CAM_MEM,
  400. "qcom system-uncached heap not found, rc=%d",
  401. rc);
  402. tbl.system_uncached_heap = NULL;
  403. goto put_heaps;
  404. }
  405. }
  406. tbl.secure_display_heap = dma_heap_find("qcom,display");
  407. if (IS_ERR_OR_NULL(tbl.secure_display_heap)) {
  408. rc = PTR_ERR(tbl.secure_display_heap);
  409. CAM_ERR(CAM_MEM, "qcom,display heap not found, rc=%d",
  410. rc);
  411. tbl.secure_display_heap = NULL;
  412. goto put_heaps;
  413. }
  414. tbl.camera_heap = dma_heap_find("qcom,camera");
  415. if (IS_ERR_OR_NULL(tbl.camera_heap)) {
  416. /* optional heap, not a fatal error */
  417. CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
  418. PTR_ERR(tbl.camera_heap));
  419. tbl.camera_heap = NULL;
  420. }
  421. tbl.camera_uncached_heap = dma_heap_find("qcom,camera-uncached");
  422. if (IS_ERR_OR_NULL(tbl.camera_uncached_heap)) {
  423. /* optional heap, not a fatal error */
  424. CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
  425. PTR_ERR(tbl.camera_uncached_heap));
  426. tbl.camera_uncached_heap = NULL;
  427. }
  428. CAM_INFO(CAM_MEM,
  429. "Heaps : system=%pK, system_uncached=%pK, camera=%pK, camera-uncached=%pK, secure_display=%pK",
  430. tbl.system_heap, tbl.system_uncached_heap,
  431. tbl.camera_heap, tbl.camera_uncached_heap,
  432. tbl.secure_display_heap);
  433. return 0;
  434. put_heaps:
  435. cam_mem_mgr_put_dma_heaps();
  436. return rc;
  437. }
  438. static int cam_mem_util_get_dma_buf(size_t len,
  439. unsigned int cam_flags,
  440. struct dma_buf **buf,
  441. unsigned long *i_ino)
  442. {
  443. int rc = 0;
  444. struct dma_heap *heap;
  445. struct dma_heap *try_heap = NULL;
  446. struct timespec64 ts1, ts2;
  447. long microsec = 0;
  448. bool use_cached_heap = false;
  449. struct mem_buf_lend_kernel_arg arg;
  450. int vmids[CAM_MAX_VMIDS];
  451. int perms[CAM_MAX_VMIDS];
  452. int num_vmids = 0;
  453. if (!buf) {
  454. CAM_ERR(CAM_MEM, "Invalid params");
  455. return -EINVAL;
  456. }
  457. if (tbl.alloc_profile_enable)
  458. CAM_GET_TIMESTAMP(ts1);
  459. if ((cam_flags & CAM_MEM_FLAG_CACHE) ||
  460. (tbl.force_cache_allocs &&
  461. (!(cam_flags & CAM_MEM_FLAG_PROTECTED_MODE)))) {
  462. CAM_DBG(CAM_MEM,
  463. "Using CACHED heap, cam_flags=0x%x, force_cache_allocs=%d",
  464. cam_flags, tbl.force_cache_allocs);
  465. use_cached_heap = true;
  466. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  467. use_cached_heap = true;
  468. CAM_DBG(CAM_MEM,
  469. "Using CACHED heap for secure, cam_flags=0x%x, force_cache_allocs=%d",
  470. cam_flags, tbl.force_cache_allocs);
  471. } else {
  472. use_cached_heap = false;
  473. CAM_ERR(CAM_MEM,
  474. "Using UNCACHED heap not supported, cam_flags=0x%x, force_cache_allocs=%d",
  475. cam_flags, tbl.force_cache_allocs);
  476. /*
  477. * Need a better handling based on whether dma-buf-heaps support
  478. * uncached heaps or not. For now, assume not supported.
  479. */
  480. return -EINVAL;
  481. }
  482. if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  483. heap = tbl.secure_display_heap;
  484. vmids[num_vmids] = VMID_CP_CAMERA;
  485. perms[num_vmids] = PERM_READ | PERM_WRITE;
  486. num_vmids++;
  487. if (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT) {
  488. CAM_DBG(CAM_MEM, "Secure mode CDSP flags");
  489. vmids[num_vmids] = VMID_CP_CDSP;
  490. perms[num_vmids] = PERM_READ | PERM_WRITE;
  491. num_vmids++;
  492. }
  493. } else if (cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL) {
  494. heap = tbl.secure_display_heap;
  495. vmids[num_vmids] = VMID_CP_NON_PIXEL;
  496. perms[num_vmids] = PERM_READ | PERM_WRITE;
  497. num_vmids++;
  498. } else if (use_cached_heap) {
  499. try_heap = tbl.camera_heap;
  500. heap = tbl.system_heap;
  501. } else {
  502. try_heap = tbl.camera_uncached_heap;
  503. heap = tbl.system_uncached_heap;
  504. }
  505. CAM_DBG(CAM_MEM, "Using heaps : try=%pK, heap=%pK", try_heap, heap);
  506. *buf = NULL;
  507. if (!try_heap && !heap) {
  508. CAM_ERR(CAM_MEM,
  509. "No heap available for allocation, cant allocate");
  510. return -EINVAL;
  511. }
  512. if (try_heap) {
  513. *buf = dma_heap_buffer_alloc(try_heap, len, O_RDWR, 0);
  514. if (IS_ERR(*buf)) {
  515. CAM_WARN(CAM_MEM,
  516. "Failed in allocating from try heap, heap=%pK, len=%zu, err=%d",
  517. try_heap, len, PTR_ERR(*buf));
  518. *buf = NULL;
  519. }
  520. }
  521. if (*buf == NULL) {
  522. *buf = dma_heap_buffer_alloc(heap, len, O_RDWR, 0);
  523. if (IS_ERR(*buf)) {
  524. rc = PTR_ERR(*buf);
  525. CAM_ERR(CAM_MEM,
  526. "Failed in allocating from heap, heap=%pK, len=%zu, err=%d",
  527. heap, len, rc);
  528. *buf = NULL;
  529. return rc;
  530. }
  531. }
  532. *i_ino = file_inode((*buf)->file)->i_ino;
  533. if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) ||
  534. (cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL)) {
  535. if (num_vmids >= CAM_MAX_VMIDS) {
  536. CAM_ERR(CAM_MEM, "Insufficient array size for vmids %d", num_vmids);
  537. rc = -EINVAL;
  538. goto end;
  539. }
  540. arg.nr_acl_entries = num_vmids;
  541. arg.vmids = vmids;
  542. arg.perms = perms;
  543. rc = mem_buf_lend(*buf, &arg);
  544. if (rc) {
  545. CAM_ERR(CAM_MEM,
  546. "Failed in buf lend rc=%d, buf=%pK, vmids [0]=0x%x, [1]=0x%x, [2]=0x%x",
  547. rc, *buf, vmids[0], vmids[1], vmids[2]);
  548. goto end;
  549. }
  550. }
  551. CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK, i_ino=%lu", len, *buf, *i_ino);
  552. if (tbl.alloc_profile_enable) {
  553. CAM_GET_TIMESTAMP(ts2);
  554. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  555. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  556. len, microsec);
  557. }
  558. return rc;
  559. end:
  560. dma_buf_put(*buf);
  561. return rc;
  562. }
  563. #else
  564. static int cam_mem_util_get_dma_buf(size_t len,
  565. unsigned int cam_flags,
  566. struct dma_buf **buf,
  567. unsigned long *i_ino)
  568. {
  569. int rc = 0;
  570. unsigned int heap_id;
  571. int32_t ion_flag = 0;
  572. struct timespec64 ts1, ts2;
  573. long microsec = 0;
  574. if (!buf) {
  575. CAM_ERR(CAM_MEM, "Invalid params");
  576. return -EINVAL;
  577. }
  578. if (tbl.alloc_profile_enable)
  579. CAM_GET_TIMESTAMP(ts1);
  580. if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
  581. (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
  582. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  583. ion_flag |=
  584. ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
  585. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  586. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  587. ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
  588. } else {
  589. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  590. ION_HEAP(ION_CAMERA_HEAP_ID);
  591. }
  592. if (cam_flags & CAM_MEM_FLAG_CACHE)
  593. ion_flag |= ION_FLAG_CACHED;
  594. else
  595. ion_flag &= ~ION_FLAG_CACHED;
  596. if (tbl.force_cache_allocs && (!(ion_flag & ION_FLAG_SECURE)))
  597. ion_flag |= ION_FLAG_CACHED;
  598. *buf = ion_alloc(len, heap_id, ion_flag);
  599. if (IS_ERR_OR_NULL(*buf))
  600. return -ENOMEM;
  601. *i_ino = file_inode((*buf)->file)->i_ino;
  602. if (tbl.alloc_profile_enable) {
  603. CAM_GET_TIMESTAMP(ts2);
  604. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  605. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  606. len, microsec);
  607. }
  608. return rc;
  609. }
  610. #endif
  611. static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
  612. struct dma_buf **dmabuf,
  613. int *fd,
  614. unsigned long *i_ino)
  615. {
  616. int rc;
  617. struct dma_buf *temp_dmabuf = NULL;
  618. rc = cam_mem_util_get_dma_buf(len, flags, dmabuf, i_ino);
  619. if (rc) {
  620. CAM_ERR(CAM_MEM,
  621. "Error allocating dma buf : len=%llu, flags=0x%x",
  622. len, flags);
  623. return rc;
  624. }
  625. *fd = dma_buf_fd(*dmabuf, O_CLOEXEC);
  626. if (*fd < 0) {
  627. CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
  628. rc = -EINVAL;
  629. goto put_buf;
  630. }
  631. CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d, i_ino=%lu",
  632. len, *dmabuf, *fd, *i_ino);
  633. /*
  634. * increment the ref count so that ref count becomes 2 here
  635. * when we close fd, refcount becomes 1 and when we do
  636. * dmap_put_buf, ref count becomes 0 and memory will be freed.
  637. */
  638. temp_dmabuf = dma_buf_get(*fd);
  639. if (IS_ERR_OR_NULL(temp_dmabuf)) {
  640. rc = PTR_ERR(temp_dmabuf);
  641. CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d, i_ino=%lu, rc=%d", *fd, *i_ino, rc);
  642. goto put_buf;
  643. }
  644. return rc;
  645. put_buf:
  646. dma_buf_put(*dmabuf);
  647. return rc;
  648. }
  649. static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
  650. {
  651. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  652. CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
  653. CAM_MEM_MMU_MAX_HANDLE);
  654. return -EINVAL;
  655. }
  656. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  657. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  658. CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
  659. return -EINVAL;
  660. }
  661. if ((cmd->flags & CAM_MEM_FLAG_EVA_NOPIXEL) &&
  662. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE ||
  663. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)){
  664. CAM_ERR(CAM_MEM,
  665. "Kernel mapping and secure mode not allowed in no pixel mode");
  666. return -EINVAL;
  667. }
  668. return 0;
  669. }
  670. static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
  671. {
  672. if (!cmd->flags) {
  673. CAM_ERR(CAM_MEM, "Invalid flags");
  674. return -EINVAL;
  675. }
  676. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  677. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  678. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  679. return -EINVAL;
  680. }
  681. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  682. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  683. CAM_ERR(CAM_MEM,
  684. "Kernel mapping in secure mode not allowed, flags=0x%x",
  685. cmd->flags);
  686. return -EINVAL;
  687. }
  688. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  689. CAM_ERR(CAM_MEM,
  690. "Shared memory buffers are not allowed to be mapped");
  691. return -EINVAL;
  692. }
  693. return 0;
  694. }
  695. static int cam_mem_util_map_hw_va(uint32_t flags,
  696. int32_t *mmu_hdls,
  697. int32_t num_hdls,
  698. int fd,
  699. struct dma_buf *dmabuf,
  700. dma_addr_t *hw_vaddr,
  701. size_t *len,
  702. enum cam_smmu_region_id region,
  703. bool is_internal)
  704. {
  705. int i;
  706. int rc = -1;
  707. int dir = cam_mem_util_get_dma_dir(flags);
  708. bool dis_delayed_unmap = false;
  709. if (dir < 0) {
  710. CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
  711. return dir;
  712. }
  713. if (flags & CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP)
  714. dis_delayed_unmap = true;
  715. CAM_DBG(CAM_MEM,
  716. "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d",
  717. fd, flags, dir, num_hdls);
  718. for (i = 0; i < num_hdls; i++) {
  719. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  720. rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dmabuf, dir, hw_vaddr, len);
  721. else
  722. rc = cam_smmu_map_user_iova(mmu_hdls[i], fd, dmabuf, dis_delayed_unmap, dir,
  723. hw_vaddr, len, region, is_internal);
  724. if (rc) {
  725. CAM_ERR(CAM_MEM,
  726. "Failed %s map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
  727. (flags & CAM_MEM_FLAG_PROTECTED_MODE) ? "" : "secured",
  728. i, fd, dir, mmu_hdls[i], rc);
  729. goto multi_map_fail;
  730. }
  731. }
  732. return rc;
  733. multi_map_fail:
  734. for (--i; i>= 0; i--) {
  735. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  736. cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dmabuf);
  737. else
  738. cam_smmu_unmap_user_iova(mmu_hdls[i], fd, dmabuf, CAM_SMMU_REGION_IO);
  739. }
  740. return rc;
  741. }
  742. int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
  743. {
  744. int rc;
  745. int32_t idx;
  746. struct dma_buf *dmabuf = NULL;
  747. int fd = -1;
  748. dma_addr_t hw_vaddr = 0;
  749. size_t len;
  750. uintptr_t kvaddr = 0;
  751. size_t klen;
  752. unsigned long i_ino = 0;
  753. if (!atomic_read(&cam_mem_mgr_state)) {
  754. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  755. return -EINVAL;
  756. }
  757. if (!cmd) {
  758. CAM_ERR(CAM_MEM, " Invalid argument");
  759. return -EINVAL;
  760. }
  761. len = cmd->len;
  762. if (tbl.need_shared_buffer_padding &&
  763. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)) {
  764. len += CAM_MEM_SHARED_BUFFER_PAD_4K;
  765. CAM_DBG(CAM_MEM, "Pad 4k size, actual %llu, allocating %zu",
  766. cmd->len, len);
  767. }
  768. rc = cam_mem_util_check_alloc_flags(cmd);
  769. if (rc) {
  770. CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
  771. cmd->flags, rc);
  772. return rc;
  773. }
  774. rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd, &i_ino);
  775. if (rc) {
  776. CAM_ERR(CAM_MEM,
  777. "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
  778. len, cmd->align, cmd->flags, cmd->num_hdl);
  779. cam_mem_mgr_print_tbl();
  780. return rc;
  781. }
  782. if (!dmabuf) {
  783. CAM_ERR(CAM_MEM,
  784. "Ion Alloc return NULL dmabuf! fd=%d, i_ino=%lu, len=%d", fd, i_ino, len);
  785. cam_mem_mgr_print_tbl();
  786. return rc;
  787. }
  788. idx = cam_mem_get_slot();
  789. if (idx < 0) {
  790. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  791. rc = -ENOMEM;
  792. goto slot_fail;
  793. }
  794. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  795. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  796. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  797. enum cam_smmu_region_id region;
  798. if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  799. region = CAM_SMMU_REGION_IO;
  800. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
  801. (cam_smmu_is_expanded_memory() && cmd->flags & CAM_MEM_FLAG_CMD_BUF_TYPE))
  802. region = CAM_SMMU_REGION_SHARED;
  803. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  804. region = CAM_SMMU_REGION_IO;
  805. rc = cam_mem_util_map_hw_va(cmd->flags,
  806. cmd->mmu_hdls,
  807. cmd->num_hdl,
  808. fd,
  809. dmabuf,
  810. &hw_vaddr,
  811. &len,
  812. region,
  813. true);
  814. if (rc) {
  815. CAM_ERR(CAM_MEM,
  816. "Failed in map_hw_va len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  817. len, cmd->flags,
  818. fd, region, cmd->num_hdl, rc);
  819. if (rc == -EALREADY) {
  820. if ((size_t)dmabuf->size != len)
  821. rc = -EBADR;
  822. cam_mem_mgr_print_tbl();
  823. }
  824. goto map_hw_fail;
  825. }
  826. }
  827. mutex_lock(&tbl.bufq[idx].q_lock);
  828. tbl.bufq[idx].fd = fd;
  829. tbl.bufq[idx].i_ino = i_ino;
  830. tbl.bufq[idx].dma_buf = NULL;
  831. tbl.bufq[idx].flags = cmd->flags;
  832. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
  833. tbl.bufq[idx].is_internal = true;
  834. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  835. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  836. if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  837. rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen);
  838. if (rc) {
  839. CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d",
  840. dmabuf, rc);
  841. goto map_kernel_fail;
  842. }
  843. }
  844. if (cmd->flags & CAM_MEM_FLAG_KMD_DEBUG_FLAG)
  845. tbl.dbg_buf_idx = idx;
  846. tbl.bufq[idx].kmdvaddr = kvaddr;
  847. tbl.bufq[idx].vaddr = hw_vaddr;
  848. tbl.bufq[idx].dma_buf = dmabuf;
  849. tbl.bufq[idx].len = len;
  850. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  851. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  852. sizeof(int32_t) * cmd->num_hdl);
  853. tbl.bufq[idx].is_imported = false;
  854. mutex_unlock(&tbl.bufq[idx].q_lock);
  855. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  856. cmd->out.fd = tbl.bufq[idx].fd;
  857. cmd->out.vaddr = 0;
  858. CAM_DBG(CAM_MEM,
  859. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu",
  860. cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  861. tbl.bufq[idx].len, tbl.bufq[idx].i_ino);
  862. return rc;
  863. map_kernel_fail:
  864. mutex_unlock(&tbl.bufq[idx].q_lock);
  865. map_hw_fail:
  866. cam_mem_put_slot(idx);
  867. slot_fail:
  868. dma_buf_put(dmabuf);
  869. return rc;
  870. }
  871. static bool cam_mem_util_is_map_internal(int32_t fd, unsigned i_ino)
  872. {
  873. uint32_t i;
  874. bool is_internal = false;
  875. mutex_lock(&tbl.m_lock);
  876. for_each_set_bit(i, tbl.bitmap, tbl.bits) {
  877. if ((tbl.bufq[i].fd == fd) && (tbl.bufq[i].i_ino == i_ino)) {
  878. is_internal = tbl.bufq[i].is_internal;
  879. break;
  880. }
  881. }
  882. mutex_unlock(&tbl.m_lock);
  883. return is_internal;
  884. }
  885. int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
  886. {
  887. int32_t idx;
  888. int rc;
  889. struct dma_buf *dmabuf;
  890. dma_addr_t hw_vaddr = 0;
  891. size_t len = 0;
  892. bool is_internal = false;
  893. unsigned long i_ino;
  894. if (!atomic_read(&cam_mem_mgr_state)) {
  895. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  896. return -EINVAL;
  897. }
  898. if (!cmd || (cmd->fd < 0)) {
  899. CAM_ERR(CAM_MEM, "Invalid argument");
  900. return -EINVAL;
  901. }
  902. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  903. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  904. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  905. return -EINVAL;
  906. }
  907. rc = cam_mem_util_check_map_flags(cmd);
  908. if (rc) {
  909. CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
  910. return rc;
  911. }
  912. dmabuf = dma_buf_get(cmd->fd);
  913. if (IS_ERR_OR_NULL((void *)(dmabuf))) {
  914. CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
  915. return -EINVAL;
  916. }
  917. i_ino = file_inode(dmabuf->file)->i_ino;
  918. is_internal = cam_mem_util_is_map_internal(cmd->fd, i_ino);
  919. idx = cam_mem_get_slot();
  920. if (idx < 0) {
  921. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d, fd=%d",
  922. idx, cmd->fd);
  923. rc = -ENOMEM;
  924. goto slot_fail;
  925. }
  926. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  927. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  928. rc = cam_mem_util_map_hw_va(cmd->flags,
  929. cmd->mmu_hdls,
  930. cmd->num_hdl,
  931. cmd->fd,
  932. dmabuf,
  933. &hw_vaddr,
  934. &len,
  935. CAM_SMMU_REGION_IO,
  936. is_internal);
  937. if (rc) {
  938. CAM_ERR(CAM_MEM,
  939. "Failed in map_hw_va, flags=0x%x, fd=%d, len=%llu, region=%d, num_hdl=%d, rc=%d",
  940. cmd->flags, cmd->fd, len,
  941. CAM_SMMU_REGION_IO, cmd->num_hdl, rc);
  942. if (rc == -EALREADY) {
  943. if ((size_t)dmabuf->size != len) {
  944. rc = -EBADR;
  945. cam_mem_mgr_print_tbl();
  946. }
  947. }
  948. goto map_fail;
  949. }
  950. }
  951. mutex_lock(&tbl.bufq[idx].q_lock);
  952. tbl.bufq[idx].fd = cmd->fd;
  953. tbl.bufq[idx].i_ino = i_ino;
  954. tbl.bufq[idx].dma_buf = NULL;
  955. tbl.bufq[idx].flags = cmd->flags;
  956. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
  957. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  958. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  959. tbl.bufq[idx].kmdvaddr = 0;
  960. if (cmd->num_hdl > 0)
  961. tbl.bufq[idx].vaddr = hw_vaddr;
  962. else
  963. tbl.bufq[idx].vaddr = 0;
  964. tbl.bufq[idx].dma_buf = dmabuf;
  965. tbl.bufq[idx].len = len;
  966. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  967. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  968. sizeof(int32_t) * cmd->num_hdl);
  969. tbl.bufq[idx].is_imported = true;
  970. tbl.bufq[idx].is_internal = is_internal;
  971. mutex_unlock(&tbl.bufq[idx].q_lock);
  972. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  973. cmd->out.vaddr = 0;
  974. cmd->out.size = (uint32_t)len;
  975. CAM_DBG(CAM_MEM,
  976. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu",
  977. cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  978. tbl.bufq[idx].len, tbl.bufq[idx].i_ino);
  979. return rc;
  980. map_fail:
  981. cam_mem_put_slot(idx);
  982. slot_fail:
  983. dma_buf_put(dmabuf);
  984. return rc;
  985. }
  986. static int cam_mem_util_unmap_hw_va(int32_t idx,
  987. enum cam_smmu_region_id region,
  988. enum cam_smmu_mapping_client client)
  989. {
  990. int i;
  991. uint32_t flags;
  992. int32_t *mmu_hdls;
  993. int num_hdls;
  994. int fd;
  995. struct dma_buf *dma_buf;
  996. unsigned long i_ino;
  997. int rc = 0;
  998. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  999. CAM_ERR(CAM_MEM, "Incorrect index");
  1000. return -EINVAL;
  1001. }
  1002. flags = tbl.bufq[idx].flags;
  1003. mmu_hdls = tbl.bufq[idx].hdls;
  1004. num_hdls = tbl.bufq[idx].num_hdl;
  1005. fd = tbl.bufq[idx].fd;
  1006. dma_buf = tbl.bufq[idx].dma_buf;
  1007. i_ino = tbl.bufq[idx].i_ino;
  1008. CAM_DBG(CAM_MEM,
  1009. "unmap_hw_va : idx=%d, fd=%x, i_ino=%lu flags=0x%x, num_hdls=%d, client=%d",
  1010. idx, fd, i_ino, flags, num_hdls, client);
  1011. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  1012. for (i = 0; i < num_hdls; i++) {
  1013. rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dma_buf);
  1014. if (rc < 0) {
  1015. CAM_ERR(CAM_MEM,
  1016. "Failed in secure unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, rc=%d",
  1017. i, fd, i_ino, mmu_hdls[i], rc);
  1018. goto unmap_end;
  1019. }
  1020. }
  1021. } else {
  1022. for (i = 0; i < num_hdls; i++) {
  1023. if (client == CAM_SMMU_MAPPING_USER) {
  1024. rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
  1025. fd, dma_buf, region);
  1026. } else if (client == CAM_SMMU_MAPPING_KERNEL) {
  1027. rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
  1028. tbl.bufq[idx].dma_buf, region);
  1029. } else {
  1030. CAM_ERR(CAM_MEM,
  1031. "invalid caller for unmapping : %d",
  1032. client);
  1033. rc = -EINVAL;
  1034. }
  1035. if (rc < 0) {
  1036. CAM_ERR(CAM_MEM,
  1037. "Failed in unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, region=%d, rc=%d",
  1038. i, fd, i_ino, mmu_hdls[i], region, rc);
  1039. goto unmap_end;
  1040. }
  1041. }
  1042. }
  1043. return rc;
  1044. unmap_end:
  1045. CAM_ERR(CAM_MEM, "unmapping failed");
  1046. return rc;
  1047. }
  1048. static void cam_mem_mgr_unmap_active_buf(int idx)
  1049. {
  1050. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1051. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  1052. region = CAM_SMMU_REGION_SHARED;
  1053. else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1054. region = CAM_SMMU_REGION_IO;
  1055. cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
  1056. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
  1057. cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  1058. tbl.bufq[idx].kmdvaddr);
  1059. }
  1060. static int cam_mem_mgr_cleanup_table(void)
  1061. {
  1062. int i;
  1063. mutex_lock(&tbl.m_lock);
  1064. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  1065. if (!tbl.bufq[i].active) {
  1066. CAM_DBG(CAM_MEM,
  1067. "Buffer inactive at idx=%d, continuing", i);
  1068. continue;
  1069. } else {
  1070. CAM_DBG(CAM_MEM,
  1071. "Active buffer at idx=%d, possible leak needs unmapping",
  1072. i);
  1073. cam_mem_mgr_unmap_active_buf(i);
  1074. }
  1075. mutex_lock(&tbl.bufq[i].q_lock);
  1076. if (tbl.bufq[i].dma_buf) {
  1077. dma_buf_put(tbl.bufq[i].dma_buf);
  1078. tbl.bufq[i].dma_buf = NULL;
  1079. }
  1080. tbl.bufq[i].fd = -1;
  1081. tbl.bufq[i].i_ino = 0;
  1082. tbl.bufq[i].flags = 0;
  1083. tbl.bufq[i].buf_handle = -1;
  1084. tbl.bufq[i].vaddr = 0;
  1085. tbl.bufq[i].len = 0;
  1086. memset(tbl.bufq[i].hdls, 0,
  1087. sizeof(int32_t) * tbl.bufq[i].num_hdl);
  1088. tbl.bufq[i].num_hdl = 0;
  1089. tbl.bufq[i].dma_buf = NULL;
  1090. tbl.bufq[i].active = false;
  1091. tbl.bufq[i].is_internal = false;
  1092. mutex_unlock(&tbl.bufq[i].q_lock);
  1093. mutex_destroy(&tbl.bufq[i].q_lock);
  1094. }
  1095. bitmap_zero(tbl.bitmap, tbl.bits);
  1096. /* We need to reserve slot 0 because 0 is invalid */
  1097. set_bit(0, tbl.bitmap);
  1098. mutex_unlock(&tbl.m_lock);
  1099. return 0;
  1100. }
  1101. void cam_mem_mgr_deinit(void)
  1102. {
  1103. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
  1104. cam_mem_mgr_cleanup_table();
  1105. debugfs_remove_recursive(tbl.dentry);
  1106. mutex_lock(&tbl.m_lock);
  1107. bitmap_zero(tbl.bitmap, tbl.bits);
  1108. kfree(tbl.bitmap);
  1109. tbl.bitmap = NULL;
  1110. tbl.dbg_buf_idx = -1;
  1111. mutex_unlock(&tbl.m_lock);
  1112. mutex_destroy(&tbl.m_lock);
  1113. }
  1114. static int cam_mem_util_unmap(int32_t idx,
  1115. enum cam_smmu_mapping_client client)
  1116. {
  1117. int rc = 0;
  1118. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1119. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1120. CAM_ERR(CAM_MEM, "Incorrect index");
  1121. return -EINVAL;
  1122. }
  1123. CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
  1124. mutex_lock(&tbl.m_lock);
  1125. if ((!tbl.bufq[idx].active) &&
  1126. (tbl.bufq[idx].vaddr) == 0) {
  1127. CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  1128. idx);
  1129. mutex_unlock(&tbl.m_lock);
  1130. return 0;
  1131. }
  1132. /* Deactivate the buffer queue to prevent multiple unmap */
  1133. mutex_lock(&tbl.bufq[idx].q_lock);
  1134. tbl.bufq[idx].active = false;
  1135. tbl.bufq[idx].vaddr = 0;
  1136. mutex_unlock(&tbl.bufq[idx].q_lock);
  1137. mutex_unlock(&tbl.m_lock);
  1138. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
  1139. if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
  1140. rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  1141. tbl.bufq[idx].kmdvaddr);
  1142. if (rc)
  1143. CAM_ERR(CAM_MEM,
  1144. "Failed, dmabuf=%pK, kmdvaddr=%pK",
  1145. tbl.bufq[idx].dma_buf,
  1146. (void *) tbl.bufq[idx].kmdvaddr);
  1147. }
  1148. }
  1149. /* SHARED flag gets precedence, all other flags after it */
  1150. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1151. region = CAM_SMMU_REGION_SHARED;
  1152. } else {
  1153. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1154. region = CAM_SMMU_REGION_IO;
  1155. }
  1156. if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  1157. (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  1158. (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  1159. if (cam_mem_util_unmap_hw_va(idx, region, client))
  1160. CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
  1161. tbl.bufq[idx].dma_buf);
  1162. /*
  1163. * Workaround as smmu driver doing put_buf without get_buf for kernel mappings
  1164. * Setting NULL here so that we dont call dma_buf_pt again below
  1165. */
  1166. if (client == CAM_SMMU_MAPPING_KERNEL)
  1167. tbl.bufq[idx].dma_buf = NULL;
  1168. }
  1169. mutex_lock(&tbl.m_lock);
  1170. mutex_lock(&tbl.bufq[idx].q_lock);
  1171. tbl.bufq[idx].flags = 0;
  1172. tbl.bufq[idx].buf_handle = -1;
  1173. memset(tbl.bufq[idx].hdls, 0,
  1174. sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
  1175. CAM_DBG(CAM_MEM,
  1176. "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK, i_ino %lu",
  1177. idx, tbl.bufq[idx].fd, tbl.bufq[idx].is_imported, tbl.bufq[idx].dma_buf,
  1178. tbl.bufq[idx].i_ino);
  1179. if (tbl.bufq[idx].dma_buf)
  1180. dma_buf_put(tbl.bufq[idx].dma_buf);
  1181. tbl.bufq[idx].fd = -1;
  1182. tbl.bufq[idx].i_ino = 0;
  1183. tbl.bufq[idx].dma_buf = NULL;
  1184. tbl.bufq[idx].is_imported = false;
  1185. tbl.bufq[idx].is_internal = false;
  1186. tbl.bufq[idx].len = 0;
  1187. tbl.bufq[idx].num_hdl = 0;
  1188. memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
  1189. mutex_unlock(&tbl.bufq[idx].q_lock);
  1190. mutex_destroy(&tbl.bufq[idx].q_lock);
  1191. clear_bit(idx, tbl.bitmap);
  1192. mutex_unlock(&tbl.m_lock);
  1193. return rc;
  1194. }
  1195. int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
  1196. {
  1197. int idx;
  1198. int rc;
  1199. if (!atomic_read(&cam_mem_mgr_state)) {
  1200. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1201. return -EINVAL;
  1202. }
  1203. if (!cmd) {
  1204. CAM_ERR(CAM_MEM, "Invalid argument");
  1205. return -EINVAL;
  1206. }
  1207. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  1208. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1209. CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
  1210. idx);
  1211. return -EINVAL;
  1212. }
  1213. if (!tbl.bufq[idx].active) {
  1214. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1215. return -EINVAL;
  1216. }
  1217. if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
  1218. CAM_ERR(CAM_MEM,
  1219. "Released buf handle %d not matching within table %d, idx=%d",
  1220. cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
  1221. return -EINVAL;
  1222. }
  1223. CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
  1224. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
  1225. return rc;
  1226. }
  1227. int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
  1228. struct cam_mem_mgr_memory_desc *out)
  1229. {
  1230. struct dma_buf *buf = NULL;
  1231. int ion_fd = -1;
  1232. int rc = 0;
  1233. uintptr_t kvaddr;
  1234. dma_addr_t iova = 0;
  1235. size_t request_len = 0;
  1236. uint32_t mem_handle;
  1237. int32_t idx;
  1238. int32_t smmu_hdl = 0;
  1239. int32_t num_hdl = 0;
  1240. unsigned long i_ino = 0;
  1241. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1242. if (!atomic_read(&cam_mem_mgr_state)) {
  1243. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1244. return -EINVAL;
  1245. }
  1246. if (!inp || !out) {
  1247. CAM_ERR(CAM_MEM, "Invalid params");
  1248. return -EINVAL;
  1249. }
  1250. if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
  1251. inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
  1252. inp->flags & CAM_MEM_FLAG_CACHE)) {
  1253. CAM_ERR(CAM_MEM, "Invalid flags for request mem");
  1254. return -EINVAL;
  1255. }
  1256. rc = cam_mem_util_get_dma_buf(inp->size, inp->flags, &buf, &i_ino);
  1257. if (rc) {
  1258. CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
  1259. goto ion_fail;
  1260. } else if (!buf) {
  1261. CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer");
  1262. goto ion_fail;
  1263. } else {
  1264. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1265. }
  1266. /*
  1267. * we are mapping kva always here,
  1268. * update flags so that we do unmap properly
  1269. */
  1270. inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
  1271. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  1272. if (rc) {
  1273. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  1274. goto map_fail;
  1275. }
  1276. if (!inp->smmu_hdl) {
  1277. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1278. rc = -EINVAL;
  1279. goto smmu_fail;
  1280. }
  1281. /* SHARED flag gets precedence, all other flags after it */
  1282. if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1283. region = CAM_SMMU_REGION_SHARED;
  1284. } else {
  1285. if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1286. region = CAM_SMMU_REGION_IO;
  1287. }
  1288. rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
  1289. buf,
  1290. CAM_SMMU_MAP_RW,
  1291. &iova,
  1292. &request_len,
  1293. region);
  1294. if (rc < 0) {
  1295. CAM_ERR(CAM_MEM, "SMMU mapping failed");
  1296. goto smmu_fail;
  1297. }
  1298. smmu_hdl = inp->smmu_hdl;
  1299. num_hdl = 1;
  1300. idx = cam_mem_get_slot();
  1301. if (idx < 0) {
  1302. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  1303. rc = -ENOMEM;
  1304. goto slot_fail;
  1305. }
  1306. mutex_lock(&tbl.bufq[idx].q_lock);
  1307. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1308. tbl.bufq[idx].dma_buf = buf;
  1309. tbl.bufq[idx].fd = -1;
  1310. tbl.bufq[idx].i_ino = i_ino;
  1311. tbl.bufq[idx].flags = inp->flags;
  1312. tbl.bufq[idx].buf_handle = mem_handle;
  1313. tbl.bufq[idx].kmdvaddr = kvaddr;
  1314. tbl.bufq[idx].vaddr = iova;
  1315. tbl.bufq[idx].len = inp->size;
  1316. tbl.bufq[idx].num_hdl = num_hdl;
  1317. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1318. sizeof(int32_t));
  1319. tbl.bufq[idx].is_imported = false;
  1320. mutex_unlock(&tbl.bufq[idx].q_lock);
  1321. out->kva = kvaddr;
  1322. out->iova = (uint32_t)iova;
  1323. out->smmu_hdl = smmu_hdl;
  1324. out->mem_handle = mem_handle;
  1325. out->len = inp->size;
  1326. out->region = region;
  1327. CAM_DBG(CAM_MEM, "idx=%d, dmabuf=%pK, i_ino=%lu, flags=0x%x, mem_handle=0x%x",
  1328. idx, buf, i_ino, inp->flags, mem_handle);
  1329. return rc;
  1330. slot_fail:
  1331. cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
  1332. buf, region);
  1333. smmu_fail:
  1334. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  1335. map_fail:
  1336. dma_buf_put(buf);
  1337. ion_fail:
  1338. return rc;
  1339. }
  1340. EXPORT_SYMBOL(cam_mem_mgr_request_mem);
  1341. int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
  1342. {
  1343. int32_t idx;
  1344. int rc;
  1345. if (!atomic_read(&cam_mem_mgr_state)) {
  1346. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1347. return -EINVAL;
  1348. }
  1349. if (!inp) {
  1350. CAM_ERR(CAM_MEM, "Invalid argument");
  1351. return -EINVAL;
  1352. }
  1353. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1354. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1355. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1356. return -EINVAL;
  1357. }
  1358. if (!tbl.bufq[idx].active) {
  1359. if (tbl.bufq[idx].vaddr == 0) {
  1360. CAM_ERR(CAM_MEM, "buffer is released already");
  1361. return 0;
  1362. }
  1363. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1364. return -EINVAL;
  1365. }
  1366. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1367. CAM_ERR(CAM_MEM,
  1368. "Released buf handle not matching within table");
  1369. return -EINVAL;
  1370. }
  1371. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1372. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1373. return rc;
  1374. }
  1375. EXPORT_SYMBOL(cam_mem_mgr_release_mem);
  1376. int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
  1377. enum cam_smmu_region_id region,
  1378. struct cam_mem_mgr_memory_desc *out)
  1379. {
  1380. struct dma_buf *buf = NULL;
  1381. int rc = 0;
  1382. int ion_fd = -1;
  1383. dma_addr_t iova = 0;
  1384. size_t request_len = 0;
  1385. uint32_t mem_handle;
  1386. int32_t idx;
  1387. int32_t smmu_hdl = 0;
  1388. int32_t num_hdl = 0;
  1389. uintptr_t kvaddr = 0;
  1390. unsigned long i_ino = 0;
  1391. if (!atomic_read(&cam_mem_mgr_state)) {
  1392. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1393. return -EINVAL;
  1394. }
  1395. if (!inp || !out) {
  1396. CAM_ERR(CAM_MEM, "Invalid param(s)");
  1397. return -EINVAL;
  1398. }
  1399. if (!inp->smmu_hdl) {
  1400. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1401. return -EINVAL;
  1402. }
  1403. if ((region != CAM_SMMU_REGION_SECHEAP) &&
  1404. (region != CAM_SMMU_REGION_FWUNCACHED)) {
  1405. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1406. return -EINVAL;
  1407. }
  1408. rc = cam_mem_util_get_dma_buf(inp->size, 0, &buf, &i_ino);
  1409. if (rc) {
  1410. CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
  1411. goto ion_fail;
  1412. } else if (!buf) {
  1413. CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer");
  1414. goto ion_fail;
  1415. } else {
  1416. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1417. }
  1418. if (inp->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  1419. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  1420. if (rc) {
  1421. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  1422. goto kmap_fail;
  1423. }
  1424. }
  1425. rc = cam_smmu_reserve_buf_region(region,
  1426. inp->smmu_hdl, buf, &iova, &request_len);
  1427. if (rc) {
  1428. CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
  1429. goto smmu_fail;
  1430. }
  1431. smmu_hdl = inp->smmu_hdl;
  1432. num_hdl = 1;
  1433. idx = cam_mem_get_slot();
  1434. if (idx < 0) {
  1435. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  1436. rc = -ENOMEM;
  1437. goto slot_fail;
  1438. }
  1439. mutex_lock(&tbl.bufq[idx].q_lock);
  1440. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1441. tbl.bufq[idx].fd = -1;
  1442. tbl.bufq[idx].i_ino = i_ino;
  1443. tbl.bufq[idx].dma_buf = buf;
  1444. tbl.bufq[idx].flags = inp->flags;
  1445. tbl.bufq[idx].buf_handle = mem_handle;
  1446. tbl.bufq[idx].kmdvaddr = kvaddr;
  1447. tbl.bufq[idx].vaddr = iova;
  1448. tbl.bufq[idx].len = request_len;
  1449. tbl.bufq[idx].num_hdl = num_hdl;
  1450. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1451. sizeof(int32_t));
  1452. tbl.bufq[idx].is_imported = false;
  1453. mutex_unlock(&tbl.bufq[idx].q_lock);
  1454. out->kva = kvaddr;
  1455. out->iova = (uint32_t)iova;
  1456. out->smmu_hdl = smmu_hdl;
  1457. out->mem_handle = mem_handle;
  1458. out->len = request_len;
  1459. out->region = region;
  1460. return rc;
  1461. slot_fail:
  1462. cam_smmu_release_buf_region(region, smmu_hdl);
  1463. smmu_fail:
  1464. if (region == CAM_SMMU_REGION_FWUNCACHED)
  1465. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  1466. kmap_fail:
  1467. dma_buf_put(buf);
  1468. ion_fail:
  1469. return rc;
  1470. }
  1471. EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
  1472. int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
  1473. {
  1474. int32_t idx;
  1475. int rc;
  1476. int32_t smmu_hdl;
  1477. if (!atomic_read(&cam_mem_mgr_state)) {
  1478. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1479. return -EINVAL;
  1480. }
  1481. if (!inp) {
  1482. CAM_ERR(CAM_MEM, "Invalid argument");
  1483. return -EINVAL;
  1484. }
  1485. if ((inp->region != CAM_SMMU_REGION_SECHEAP) &&
  1486. (inp->region != CAM_SMMU_REGION_FWUNCACHED)) {
  1487. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1488. return -EINVAL;
  1489. }
  1490. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1491. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1492. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1493. return -EINVAL;
  1494. }
  1495. if (!tbl.bufq[idx].active) {
  1496. if (tbl.bufq[idx].vaddr == 0) {
  1497. CAM_ERR(CAM_MEM, "buffer is released already");
  1498. return 0;
  1499. }
  1500. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1501. return -EINVAL;
  1502. }
  1503. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1504. CAM_ERR(CAM_MEM,
  1505. "Released buf handle not matching within table");
  1506. return -EINVAL;
  1507. }
  1508. if (tbl.bufq[idx].num_hdl != 1) {
  1509. CAM_ERR(CAM_MEM,
  1510. "Sec heap region should have only one smmu hdl");
  1511. return -ENODEV;
  1512. }
  1513. memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
  1514. sizeof(int32_t));
  1515. if (inp->smmu_hdl != smmu_hdl) {
  1516. CAM_ERR(CAM_MEM,
  1517. "Passed SMMU handle doesn't match with internal hdl");
  1518. return -ENODEV;
  1519. }
  1520. rc = cam_smmu_release_buf_region(inp->region, inp->smmu_hdl);
  1521. if (rc) {
  1522. CAM_ERR(CAM_MEM,
  1523. "Sec heap region release failed");
  1524. return -ENODEV;
  1525. }
  1526. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1527. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1528. if (rc)
  1529. CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
  1530. return rc;
  1531. }
  1532. EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);
  1533. #ifndef CONFIG_CAM_PRESIL
  1534. struct dma_buf * cam_mem_mgr_get_dma_buf(int fd)
  1535. {
  1536. return NULL;
  1537. }
  1538. int cam_mem_mgr_send_all_buffers_to_presil(int32_t iommu_hdl)
  1539. {
  1540. return 0;
  1541. }
  1542. int cam_mem_mgr_send_buffer_to_presil(int32_t iommu_hdl, int32_t buf_handle)
  1543. {
  1544. return 0;
  1545. }
  1546. int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle,
  1547. uint32_t buf_size,
  1548. uint32_t offset,
  1549. int32_t iommu_hdl)
  1550. {
  1551. return 0;
  1552. }
  1553. #endif