cam_mem_mgr.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/types.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/version.h>
  11. #include <linux/debugfs.h>
  12. #include "cam_compat.h"
  13. #include "cam_req_mgr_util.h"
  14. #include "cam_mem_mgr.h"
  15. #include "cam_smmu_api.h"
  16. #include "cam_debug_util.h"
  17. #include "cam_trace.h"
  18. #include "cam_common_util.h"
  19. #define CAM_MEM_SHARED_BUFFER_PAD_4K (4 * 1024)
  20. static struct cam_mem_table tbl;
  21. static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
  22. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  23. static void cam_mem_mgr_put_dma_heaps(void);
  24. static int cam_mem_mgr_get_dma_heaps(void);
  25. #endif
  26. static void cam_mem_mgr_print_tbl(void)
  27. {
  28. int i;
  29. uint64_t ms, tmp, hrs, min, sec;
  30. struct timespec64 *ts = NULL;
  31. struct timespec64 current_ts;
  32. ktime_get_real_ts64(&(current_ts));
  33. tmp = current_ts.tv_sec;
  34. ms = (current_ts.tv_nsec) / 1000000;
  35. sec = do_div(tmp, 60);
  36. min = do_div(tmp, 60);
  37. hrs = do_div(tmp, 24);
  38. CAM_INFO(CAM_MEM, "***%llu:%llu:%llu:%llu Mem mgr table dump***",
  39. hrs, min, sec, ms);
  40. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  41. if (tbl.bufq[i].active) {
  42. ts = &tbl.bufq[i].timestamp;
  43. tmp = ts->tv_sec;
  44. ms = (ts->tv_nsec) / 1000000;
  45. sec = do_div(tmp, 60);
  46. min = do_div(tmp, 60);
  47. hrs = do_div(tmp, 24);
  48. CAM_INFO(CAM_MEM,
  49. "%llu:%llu:%llu:%llu idx %d fd %d size %llu",
  50. hrs, min, sec, ms, i, tbl.bufq[i].fd,
  51. tbl.bufq[i].len);
  52. }
  53. }
  54. }
  55. static int cam_mem_util_get_dma_dir(uint32_t flags)
  56. {
  57. int rc = -EINVAL;
  58. if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
  59. rc = DMA_TO_DEVICE;
  60. else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
  61. rc = DMA_FROM_DEVICE;
  62. else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
  63. rc = DMA_BIDIRECTIONAL;
  64. else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  65. rc = DMA_BIDIRECTIONAL;
  66. return rc;
  67. }
  68. static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
  69. uintptr_t *vaddr,
  70. size_t *len)
  71. {
  72. int rc = 0;
  73. void *addr;
  74. /*
  75. * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  76. * need to be called in pair to avoid stability issue.
  77. */
  78. rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  79. if (rc) {
  80. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  81. return rc;
  82. }
  83. addr = dma_buf_vmap(dmabuf);
  84. if (!addr) {
  85. CAM_ERR(CAM_MEM, "kernel map fail");
  86. *vaddr = 0;
  87. *len = 0;
  88. rc = -ENOSPC;
  89. goto fail;
  90. }
  91. *vaddr = (uint64_t)addr;
  92. *len = dmabuf->size;
  93. return 0;
  94. fail:
  95. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  96. return rc;
  97. }
  98. static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
  99. uint64_t vaddr)
  100. {
  101. int rc = 0;
  102. if (!dmabuf || !vaddr) {
  103. CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
  104. return -EINVAL;
  105. }
  106. dma_buf_vunmap(dmabuf, (void *)vaddr);
  107. /*
  108. * dma_buf_begin_cpu_access() and
  109. * dma_buf_end_cpu_access() need to be called in pair
  110. * to avoid stability issue.
  111. */
  112. rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  113. if (rc) {
  114. CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
  115. dmabuf);
  116. return rc;
  117. }
  118. return rc;
  119. }
  120. static int cam_mem_mgr_create_debug_fs(void)
  121. {
  122. int rc = 0;
  123. struct dentry *dbgfileptr = NULL;
  124. dbgfileptr = debugfs_create_dir("camera_memmgr", NULL);
  125. if (!dbgfileptr) {
  126. CAM_ERR(CAM_MEM,"DebugFS could not create directory!");
  127. rc = -ENOENT;
  128. goto end;
  129. }
  130. /* Store parent inode for cleanup in caller */
  131. tbl.dentry = dbgfileptr;
  132. dbgfileptr = debugfs_create_bool("alloc_profile_enable", 0644,
  133. tbl.dentry, &tbl.alloc_profile_enable);
  134. if (IS_ERR(dbgfileptr)) {
  135. if (PTR_ERR(dbgfileptr) == -ENODEV)
  136. CAM_WARN(CAM_MEM, "DebugFS not enabled in kernel!");
  137. else
  138. rc = PTR_ERR(dbgfileptr);
  139. }
  140. end:
  141. return rc;
  142. }
  143. int cam_mem_mgr_init(void)
  144. {
  145. int i;
  146. int bitmap_size;
  147. int rc = 0;
  148. memset(tbl.bufq, 0, sizeof(tbl.bufq));
  149. if (cam_smmu_need_force_alloc_cached(&tbl.force_cache_allocs)) {
  150. CAM_ERR(CAM_MEM, "Error in getting force cache alloc flag");
  151. return -EINVAL;
  152. }
  153. tbl.need_shared_buffer_padding = cam_smmu_need_shared_buffer_padding();
  154. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  155. rc = cam_mem_mgr_get_dma_heaps();
  156. if (rc) {
  157. CAM_ERR(CAM_MEM, "Failed in getting dma heaps rc=%d", rc);
  158. return rc;
  159. }
  160. #endif
  161. bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
  162. tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  163. if (!tbl.bitmap) {
  164. rc = -ENOMEM;
  165. goto put_heaps;
  166. }
  167. tbl.bits = bitmap_size * BITS_PER_BYTE;
  168. bitmap_zero(tbl.bitmap, tbl.bits);
  169. /* We need to reserve slot 0 because 0 is invalid */
  170. set_bit(0, tbl.bitmap);
  171. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  172. tbl.bufq[i].fd = -1;
  173. tbl.bufq[i].buf_handle = -1;
  174. }
  175. mutex_init(&tbl.m_lock);
  176. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
  177. cam_mem_mgr_create_debug_fs();
  178. return 0;
  179. put_heaps:
  180. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  181. cam_mem_mgr_put_dma_heaps();
  182. #endif
  183. return rc;
  184. }
  185. static int32_t cam_mem_get_slot(void)
  186. {
  187. int32_t idx;
  188. mutex_lock(&tbl.m_lock);
  189. idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
  190. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  191. mutex_unlock(&tbl.m_lock);
  192. return -ENOMEM;
  193. }
  194. set_bit(idx, tbl.bitmap);
  195. tbl.bufq[idx].active = true;
  196. ktime_get_real_ts64(&(tbl.bufq[idx].timestamp));
  197. mutex_init(&tbl.bufq[idx].q_lock);
  198. mutex_unlock(&tbl.m_lock);
  199. return idx;
  200. }
  201. static void cam_mem_put_slot(int32_t idx)
  202. {
  203. mutex_lock(&tbl.m_lock);
  204. mutex_lock(&tbl.bufq[idx].q_lock);
  205. tbl.bufq[idx].active = false;
  206. tbl.bufq[idx].is_internal = false;
  207. memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
  208. mutex_unlock(&tbl.bufq[idx].q_lock);
  209. mutex_destroy(&tbl.bufq[idx].q_lock);
  210. clear_bit(idx, tbl.bitmap);
  211. mutex_unlock(&tbl.m_lock);
  212. }
  213. int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
  214. dma_addr_t *iova_ptr, size_t *len_ptr)
  215. {
  216. int rc = 0, idx;
  217. *len_ptr = 0;
  218. if (!atomic_read(&cam_mem_mgr_state)) {
  219. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  220. return -EINVAL;
  221. }
  222. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  223. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  224. return -ENOENT;
  225. if (!tbl.bufq[idx].active) {
  226. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  227. idx);
  228. return -EAGAIN;
  229. }
  230. mutex_lock(&tbl.bufq[idx].q_lock);
  231. if (buf_handle != tbl.bufq[idx].buf_handle) {
  232. rc = -EINVAL;
  233. goto handle_mismatch;
  234. }
  235. if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
  236. rc = cam_smmu_get_stage2_iova(mmu_handle,
  237. tbl.bufq[idx].fd,
  238. iova_ptr,
  239. len_ptr);
  240. else
  241. rc = cam_smmu_get_iova(mmu_handle,
  242. tbl.bufq[idx].fd,
  243. iova_ptr,
  244. len_ptr);
  245. if (rc) {
  246. CAM_ERR(CAM_MEM,
  247. "fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d",
  248. buf_handle, mmu_handle, tbl.bufq[idx].fd);
  249. goto handle_mismatch;
  250. }
  251. CAM_DBG(CAM_MEM,
  252. "handle:0x%x fd:%d iova_ptr:%pK len_ptr:%llu",
  253. mmu_handle, tbl.bufq[idx].fd, iova_ptr, *len_ptr);
  254. handle_mismatch:
  255. mutex_unlock(&tbl.bufq[idx].q_lock);
  256. return rc;
  257. }
  258. EXPORT_SYMBOL(cam_mem_get_io_buf);
  259. int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
  260. {
  261. int idx;
  262. if (!atomic_read(&cam_mem_mgr_state)) {
  263. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  264. return -EINVAL;
  265. }
  266. if (!atomic_read(&cam_mem_mgr_state)) {
  267. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  268. return -EINVAL;
  269. }
  270. if (!buf_handle || !vaddr_ptr || !len)
  271. return -EINVAL;
  272. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  273. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  274. return -EINVAL;
  275. if (!tbl.bufq[idx].active) {
  276. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  277. idx);
  278. return -EPERM;
  279. }
  280. if (buf_handle != tbl.bufq[idx].buf_handle)
  281. return -EINVAL;
  282. if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
  283. return -EINVAL;
  284. if (tbl.bufq[idx].kmdvaddr) {
  285. *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
  286. *len = tbl.bufq[idx].len;
  287. } else {
  288. CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
  289. buf_handle);
  290. return -EINVAL;
  291. }
  292. return 0;
  293. }
  294. EXPORT_SYMBOL(cam_mem_get_cpu_buf);
  295. int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
  296. {
  297. int rc = 0, idx;
  298. uint32_t cache_dir;
  299. unsigned long dmabuf_flag = 0;
  300. if (!atomic_read(&cam_mem_mgr_state)) {
  301. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  302. return -EINVAL;
  303. }
  304. if (!cmd)
  305. return -EINVAL;
  306. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  307. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  308. return -EINVAL;
  309. mutex_lock(&tbl.bufq[idx].q_lock);
  310. if (!tbl.bufq[idx].active) {
  311. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  312. idx);
  313. rc = -EINVAL;
  314. goto end;
  315. }
  316. if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
  317. rc = -EINVAL;
  318. goto end;
  319. }
  320. rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
  321. if (rc) {
  322. CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
  323. goto end;
  324. }
  325. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  326. CAM_DBG(CAM_MEM, "Calling dmap buf APIs for cache operations");
  327. cache_dir = DMA_BIDIRECTIONAL;
  328. #else
  329. if (dmabuf_flag & ION_FLAG_CACHED) {
  330. switch (cmd->mem_cache_ops) {
  331. case CAM_MEM_CLEAN_CACHE:
  332. cache_dir = DMA_TO_DEVICE;
  333. break;
  334. case CAM_MEM_INV_CACHE:
  335. cache_dir = DMA_FROM_DEVICE;
  336. break;
  337. case CAM_MEM_CLEAN_INV_CACHE:
  338. cache_dir = DMA_BIDIRECTIONAL;
  339. break;
  340. default:
  341. CAM_ERR(CAM_MEM,
  342. "invalid cache ops :%d", cmd->mem_cache_ops);
  343. rc = -EINVAL;
  344. goto end;
  345. }
  346. } else {
  347. CAM_DBG(CAM_MEM, "BUF is not cached");
  348. goto end;
  349. }
  350. #endif
  351. rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
  352. (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
  353. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  354. if (rc) {
  355. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  356. goto end;
  357. }
  358. rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
  359. cache_dir);
  360. if (rc) {
  361. CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
  362. goto end;
  363. }
  364. end:
  365. mutex_unlock(&tbl.bufq[idx].q_lock);
  366. return rc;
  367. }
  368. EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
  369. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  370. static void cam_mem_mgr_put_dma_heaps(void)
  371. {
  372. CAM_DBG(CAM_MEM, "Releasing DMA Buf heaps usage");
  373. }
  374. static int cam_mem_mgr_get_dma_heaps(void)
  375. {
  376. int rc = 0;
  377. tbl.system_heap = NULL;
  378. tbl.system_uncached_heap = NULL;
  379. tbl.camera_heap = NULL;
  380. tbl.camera_uncached_heap = NULL;
  381. tbl.secure_display_heap = NULL;
  382. tbl.system_heap = dma_heap_find("qcom,system");
  383. if (IS_ERR_OR_NULL(tbl.system_heap)) {
  384. rc = PTR_ERR(tbl.system_heap);
  385. CAM_ERR(CAM_MEM, "qcom system heap not found, rc=%d", rc);
  386. tbl.system_heap = NULL;
  387. goto put_heaps;
  388. }
  389. tbl.system_uncached_heap = dma_heap_find("qcom,system-uncached");
  390. if (IS_ERR_OR_NULL(tbl.system_uncached_heap)) {
  391. if (tbl.force_cache_allocs) {
  392. /* optional, we anyway do not use uncached */
  393. CAM_DBG(CAM_MEM,
  394. "qcom system-uncached heap not found, err=%d",
  395. PTR_ERR(tbl.system_uncached_heap));
  396. tbl.system_uncached_heap = NULL;
  397. } else {
  398. /* fatal, must need uncached heaps */
  399. rc = PTR_ERR(tbl.system_uncached_heap);
  400. CAM_ERR(CAM_MEM,
  401. "qcom system-uncached heap not found, rc=%d",
  402. rc);
  403. tbl.system_uncached_heap = NULL;
  404. goto put_heaps;
  405. }
  406. }
  407. tbl.secure_display_heap = dma_heap_find("qcom,secure-display");
  408. if (IS_ERR_OR_NULL(tbl.secure_display_heap)) {
  409. rc = PTR_ERR(tbl.secure_display_heap);
  410. CAM_ERR(CAM_MEM, "qcom,secure-display heap not found, rc=%d",
  411. rc);
  412. tbl.secure_display_heap = NULL;
  413. goto put_heaps;
  414. }
  415. tbl.camera_heap = dma_heap_find("qcom,camera");
  416. if (IS_ERR_OR_NULL(tbl.camera_heap)) {
  417. /* optional heap, not a fatal error */
  418. CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
  419. PTR_ERR(tbl.camera_heap));
  420. tbl.camera_heap = NULL;
  421. }
  422. tbl.camera_uncached_heap = dma_heap_find("qcom,camera-uncached");
  423. if (IS_ERR_OR_NULL(tbl.camera_uncached_heap)) {
  424. /* optional heap, not a fatal error */
  425. CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
  426. PTR_ERR(tbl.camera_uncached_heap));
  427. tbl.camera_uncached_heap = NULL;
  428. }
  429. CAM_INFO(CAM_MEM,
  430. "Heaps : system=%pK, system_uncached=%pK, camera=%pK, camera-uncached=%pK, secure_display=%pK",
  431. tbl.system_heap, tbl.system_uncached_heap,
  432. tbl.camera_heap, tbl.camera_uncached_heap,
  433. tbl.secure_display_heap);
  434. return 0;
  435. put_heaps:
  436. cam_mem_mgr_put_dma_heaps();
  437. return rc;
  438. }
  439. static int cam_mem_util_get_dma_buf(size_t len,
  440. unsigned int cam_flags,
  441. struct dma_buf **buf)
  442. {
  443. int rc = 0;
  444. struct dma_heap *heap;
  445. struct dma_heap *try_heap = NULL;
  446. struct timespec64 ts1, ts2;
  447. long microsec = 0;
  448. bool use_cached_heap = false;
  449. if (!buf) {
  450. CAM_ERR(CAM_MEM, "Invalid params");
  451. return -EINVAL;
  452. }
  453. if (tbl.alloc_profile_enable)
  454. CAM_GET_TIMESTAMP(ts1);
  455. if ((cam_flags & CAM_MEM_FLAG_CACHE) ||
  456. (tbl.force_cache_allocs &&
  457. (!(cam_flags & CAM_MEM_FLAG_PROTECTED_MODE)))) {
  458. CAM_DBG(CAM_MEM,
  459. "Using CACHED heap, cam_flags=0x%x, force_cache_allocs=%d",
  460. cam_flags, tbl.force_cache_allocs);
  461. use_cached_heap = true;
  462. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  463. use_cached_heap = true;
  464. CAM_DBG(CAM_MEM,
  465. "Using CACHED heap for secure, cam_flags=0x%x, force_cache_allocs=%d",
  466. cam_flags, tbl.force_cache_allocs);
  467. } else {
  468. use_cached_heap = false;
  469. CAM_ERR(CAM_MEM,
  470. "Using UNCACHED heap not supported, cam_flags=0x%x, force_cache_allocs=%d",
  471. cam_flags, tbl.force_cache_allocs);
  472. /*
  473. * Need a better handling based on whether dma-buf-heaps support
  474. * uncached heaps or not. For now, assume not supported.
  475. */
  476. return -EINVAL;
  477. }
  478. if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
  479. (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
  480. heap = tbl.secure_display_heap;
  481. CAM_ERR(CAM_MEM, "Secure CDSP not supported yet");
  482. return -EBADR;
  483. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  484. heap = tbl.secure_display_heap;
  485. CAM_ERR(CAM_MEM, "Secure mode not supported yet");
  486. return -EBADR;
  487. }
  488. if (use_cached_heap) {
  489. try_heap = tbl.camera_heap;
  490. heap = tbl.system_heap;
  491. } else {
  492. try_heap = tbl.camera_uncached_heap;
  493. heap = tbl.system_uncached_heap;
  494. }
  495. CAM_DBG(CAM_MEM, "Using heaps : try=%pK, heap=%pK", try_heap, heap);
  496. *buf = NULL;
  497. if (!try_heap && !heap) {
  498. CAM_ERR(CAM_MEM,
  499. "No heap available for allocation, cant allocate");
  500. return -EINVAL;
  501. }
  502. if (try_heap) {
  503. *buf = dma_heap_buffer_alloc(try_heap, len, O_RDWR, 0);
  504. if (IS_ERR_OR_NULL(*buf)) {
  505. CAM_WARN(CAM_MEM,
  506. "Failed in allocating from try heap, heap=%pK, len=%zu, err=%d",
  507. try_heap, len, PTR_ERR(*buf));
  508. *buf = NULL;
  509. }
  510. }
  511. if (*buf == NULL) {
  512. *buf = dma_heap_buffer_alloc(heap, len, O_RDWR, 0);
  513. if (IS_ERR_OR_NULL(*buf)) {
  514. rc = PTR_ERR(*buf);
  515. CAM_ERR(CAM_MEM,
  516. "Failed in allocating from heap, heap=%pK, len=%zu, err=%d",
  517. heap, len, rc);
  518. *buf = NULL;
  519. return rc;
  520. }
  521. }
  522. CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK", len, *buf);
  523. if (tbl.alloc_profile_enable) {
  524. CAM_GET_TIMESTAMP(ts2);
  525. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  526. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  527. len, microsec);
  528. }
  529. return rc;
  530. }
  531. #else
  532. static int cam_mem_util_get_dma_buf(size_t len,
  533. unsigned int cam_flags,
  534. struct dma_buf **buf)
  535. {
  536. int rc = 0;
  537. unsigned int heap_id;
  538. int32_t ion_flag = 0;
  539. struct timespec64 ts1, ts2;
  540. long microsec = 0;
  541. if (!buf) {
  542. CAM_ERR(CAM_MEM, "Invalid params");
  543. return -EINVAL;
  544. }
  545. if (tbl.alloc_profile_enable)
  546. CAM_GET_TIMESTAMP(ts1);
  547. if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
  548. (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
  549. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  550. ion_flag |=
  551. ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
  552. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  553. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  554. ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
  555. } else {
  556. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  557. ION_HEAP(ION_CAMERA_HEAP_ID);
  558. }
  559. if (cam_flags & CAM_MEM_FLAG_CACHE)
  560. ion_flag |= ION_FLAG_CACHED;
  561. else
  562. ion_flag &= ~ION_FLAG_CACHED;
  563. if (tbl.force_cache_allocs && (!(ion_flag & ION_FLAG_SECURE)))
  564. ion_flag |= ION_FLAG_CACHED;
  565. *buf = ion_alloc(len, heap_id, ion_flag);
  566. if (IS_ERR_OR_NULL(*buf))
  567. return -ENOMEM;
  568. if (tbl.alloc_profile_enable) {
  569. CAM_GET_TIMESTAMP(ts2);
  570. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  571. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  572. len, microsec);
  573. }
  574. return rc;
  575. }
  576. #endif
  577. static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
  578. struct dma_buf **dmabuf,
  579. int *fd)
  580. {
  581. int rc;
  582. struct dma_buf *temp_dmabuf = NULL;
  583. rc = cam_mem_util_get_dma_buf(len, flags, dmabuf);
  584. if (rc) {
  585. CAM_ERR(CAM_MEM,
  586. "Error allocating dma buf : len=%llu, flags=0x%x",
  587. len, flags);
  588. return rc;
  589. }
  590. *fd = dma_buf_fd(*dmabuf, O_CLOEXEC);
  591. if (*fd < 0) {
  592. CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
  593. rc = -EINVAL;
  594. goto put_buf;
  595. }
  596. CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d",
  597. len, *dmabuf, *fd);
  598. /*
  599. * increment the ref count so that ref count becomes 2 here
  600. * when we close fd, refcount becomes 1 and when we do
  601. * dmap_put_buf, ref count becomes 0 and memory will be freed.
  602. */
  603. temp_dmabuf = dma_buf_get(*fd);
  604. if (IS_ERR_OR_NULL(temp_dmabuf)) {
  605. CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d", *fd);
  606. rc = -EINVAL;
  607. goto put_buf;
  608. }
  609. return rc;
  610. put_buf:
  611. dma_buf_put(*dmabuf);
  612. return rc;
  613. }
  614. static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
  615. {
  616. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  617. CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
  618. CAM_MEM_MMU_MAX_HANDLE);
  619. return -EINVAL;
  620. }
  621. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  622. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  623. CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
  624. return -EINVAL;
  625. }
  626. return 0;
  627. }
  628. static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
  629. {
  630. if (!cmd->flags) {
  631. CAM_ERR(CAM_MEM, "Invalid flags");
  632. return -EINVAL;
  633. }
  634. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  635. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  636. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  637. return -EINVAL;
  638. }
  639. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  640. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  641. CAM_ERR(CAM_MEM,
  642. "Kernel mapping in secure mode not allowed, flags=0x%x",
  643. cmd->flags);
  644. return -EINVAL;
  645. }
  646. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  647. CAM_ERR(CAM_MEM,
  648. "Shared memory buffers are not allowed to be mapped");
  649. return -EINVAL;
  650. }
  651. return 0;
  652. }
  653. static int cam_mem_util_map_hw_va(uint32_t flags,
  654. int32_t *mmu_hdls,
  655. int32_t num_hdls,
  656. int fd,
  657. dma_addr_t *hw_vaddr,
  658. size_t *len,
  659. enum cam_smmu_region_id region,
  660. bool is_internal)
  661. {
  662. int i;
  663. int rc = -1;
  664. int dir = cam_mem_util_get_dma_dir(flags);
  665. bool dis_delayed_unmap = false;
  666. if (dir < 0) {
  667. CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
  668. return dir;
  669. }
  670. if (flags & CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP)
  671. dis_delayed_unmap = true;
  672. CAM_DBG(CAM_MEM,
  673. "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d",
  674. fd, flags, dir, num_hdls);
  675. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  676. for (i = 0; i < num_hdls; i++) {
  677. rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
  678. fd,
  679. dir,
  680. hw_vaddr,
  681. len);
  682. if (rc < 0) {
  683. CAM_ERR(CAM_MEM,
  684. "Failed to securely map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
  685. i, fd, dir, mmu_hdls[i], rc);
  686. goto multi_map_fail;
  687. }
  688. }
  689. } else {
  690. for (i = 0; i < num_hdls; i++) {
  691. rc = cam_smmu_map_user_iova(mmu_hdls[i],
  692. fd,
  693. dis_delayed_unmap,
  694. dir,
  695. (dma_addr_t *)hw_vaddr,
  696. len,
  697. region,
  698. is_internal);
  699. if (rc < 0) {
  700. CAM_ERR(CAM_MEM,
  701. "Failed to map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, region=%d, rc=%d",
  702. i, fd, dir, mmu_hdls[i], region, rc);
  703. goto multi_map_fail;
  704. }
  705. }
  706. }
  707. return rc;
  708. multi_map_fail:
  709. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  710. for (--i; i >= 0; i--)
  711. cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
  712. else
  713. for (--i; i >= 0; i--)
  714. cam_smmu_unmap_user_iova(mmu_hdls[i],
  715. fd,
  716. CAM_SMMU_REGION_IO);
  717. return rc;
  718. }
  719. int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
  720. {
  721. int rc;
  722. int32_t idx;
  723. struct dma_buf *dmabuf = NULL;
  724. int fd = -1;
  725. dma_addr_t hw_vaddr = 0;
  726. size_t len;
  727. uintptr_t kvaddr = 0;
  728. size_t klen;
  729. if (!atomic_read(&cam_mem_mgr_state)) {
  730. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  731. return -EINVAL;
  732. }
  733. if (!cmd) {
  734. CAM_ERR(CAM_MEM, " Invalid argument");
  735. return -EINVAL;
  736. }
  737. len = cmd->len;
  738. if (tbl.need_shared_buffer_padding &&
  739. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)) {
  740. len += CAM_MEM_SHARED_BUFFER_PAD_4K;
  741. CAM_DBG(CAM_MEM, "Pad 4k size, actual %llu, allocating %zu",
  742. cmd->len, len);
  743. }
  744. rc = cam_mem_util_check_alloc_flags(cmd);
  745. if (rc) {
  746. CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
  747. cmd->flags, rc);
  748. return rc;
  749. }
  750. rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd);
  751. if (rc) {
  752. CAM_ERR(CAM_MEM,
  753. "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
  754. len, cmd->align, cmd->flags, cmd->num_hdl);
  755. cam_mem_mgr_print_tbl();
  756. return rc;
  757. }
  758. idx = cam_mem_get_slot();
  759. if (idx < 0) {
  760. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  761. rc = -ENOMEM;
  762. goto slot_fail;
  763. }
  764. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  765. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  766. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  767. enum cam_smmu_region_id region;
  768. if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  769. region = CAM_SMMU_REGION_IO;
  770. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  771. region = CAM_SMMU_REGION_SHARED;
  772. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  773. region = CAM_SMMU_REGION_SECHEAP;
  774. rc = cam_mem_util_map_hw_va(cmd->flags,
  775. cmd->mmu_hdls,
  776. cmd->num_hdl,
  777. fd,
  778. &hw_vaddr,
  779. &len,
  780. region,
  781. true);
  782. if (rc) {
  783. CAM_ERR(CAM_MEM,
  784. "Failed in map_hw_va len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  785. len, cmd->flags,
  786. fd, region, cmd->num_hdl, rc);
  787. if (rc == -EALREADY) {
  788. if ((size_t)dmabuf->size != len)
  789. rc = -EBADR;
  790. cam_mem_mgr_print_tbl();
  791. }
  792. goto map_hw_fail;
  793. }
  794. }
  795. mutex_lock(&tbl.bufq[idx].q_lock);
  796. tbl.bufq[idx].fd = fd;
  797. tbl.bufq[idx].dma_buf = NULL;
  798. tbl.bufq[idx].flags = cmd->flags;
  799. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
  800. tbl.bufq[idx].is_internal = true;
  801. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  802. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  803. if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  804. rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen);
  805. if (rc) {
  806. CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d",
  807. dmabuf, rc);
  808. goto map_kernel_fail;
  809. }
  810. }
  811. if (cmd->flags & CAM_MEM_FLAG_KMD_DEBUG_FLAG)
  812. tbl.dbg_buf_idx = idx;
  813. tbl.bufq[idx].kmdvaddr = kvaddr;
  814. tbl.bufq[idx].vaddr = hw_vaddr;
  815. tbl.bufq[idx].dma_buf = dmabuf;
  816. tbl.bufq[idx].len = len;
  817. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  818. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  819. sizeof(int32_t) * cmd->num_hdl);
  820. tbl.bufq[idx].is_imported = false;
  821. mutex_unlock(&tbl.bufq[idx].q_lock);
  822. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  823. cmd->out.fd = tbl.bufq[idx].fd;
  824. cmd->out.vaddr = 0;
  825. CAM_DBG(CAM_MEM,
  826. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
  827. cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  828. tbl.bufq[idx].len);
  829. return rc;
  830. map_kernel_fail:
  831. mutex_unlock(&tbl.bufq[idx].q_lock);
  832. map_hw_fail:
  833. cam_mem_put_slot(idx);
  834. slot_fail:
  835. dma_buf_put(dmabuf);
  836. return rc;
  837. }
  838. static bool cam_mem_util_is_map_internal(int32_t fd)
  839. {
  840. uint32_t i;
  841. bool is_internal = false;
  842. mutex_lock(&tbl.m_lock);
  843. for_each_set_bit(i, tbl.bitmap, tbl.bits) {
  844. if (tbl.bufq[i].fd == fd) {
  845. is_internal = tbl.bufq[i].is_internal;
  846. break;
  847. }
  848. }
  849. mutex_unlock(&tbl.m_lock);
  850. return is_internal;
  851. }
  852. int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
  853. {
  854. int32_t idx;
  855. int rc;
  856. struct dma_buf *dmabuf;
  857. dma_addr_t hw_vaddr = 0;
  858. size_t len = 0;
  859. bool is_internal = false;
  860. if (!atomic_read(&cam_mem_mgr_state)) {
  861. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  862. return -EINVAL;
  863. }
  864. if (!cmd || (cmd->fd < 0)) {
  865. CAM_ERR(CAM_MEM, "Invalid argument");
  866. return -EINVAL;
  867. }
  868. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  869. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  870. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  871. return -EINVAL;
  872. }
  873. rc = cam_mem_util_check_map_flags(cmd);
  874. if (rc) {
  875. CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
  876. return rc;
  877. }
  878. dmabuf = dma_buf_get(cmd->fd);
  879. if (IS_ERR_OR_NULL((void *)(dmabuf))) {
  880. CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
  881. return -EINVAL;
  882. }
  883. is_internal = cam_mem_util_is_map_internal(cmd->fd);
  884. idx = cam_mem_get_slot();
  885. if (idx < 0) {
  886. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d, fd=%d",
  887. idx, cmd->fd);
  888. rc = -ENOMEM;
  889. goto slot_fail;
  890. }
  891. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  892. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  893. rc = cam_mem_util_map_hw_va(cmd->flags,
  894. cmd->mmu_hdls,
  895. cmd->num_hdl,
  896. cmd->fd,
  897. &hw_vaddr,
  898. &len,
  899. CAM_SMMU_REGION_IO,
  900. is_internal);
  901. if (rc) {
  902. CAM_ERR(CAM_MEM,
  903. "Failed in map_hw_va, flags=0x%x, fd=%d, len=%llu, region=%d, num_hdl=%d, rc=%d",
  904. cmd->flags, cmd->fd, len,
  905. CAM_SMMU_REGION_IO, cmd->num_hdl, rc);
  906. if (rc == -EALREADY) {
  907. if ((size_t)dmabuf->size != len) {
  908. rc = -EBADR;
  909. cam_mem_mgr_print_tbl();
  910. }
  911. }
  912. goto map_fail;
  913. }
  914. }
  915. mutex_lock(&tbl.bufq[idx].q_lock);
  916. tbl.bufq[idx].fd = cmd->fd;
  917. tbl.bufq[idx].dma_buf = NULL;
  918. tbl.bufq[idx].flags = cmd->flags;
  919. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
  920. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  921. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  922. tbl.bufq[idx].kmdvaddr = 0;
  923. if (cmd->num_hdl > 0)
  924. tbl.bufq[idx].vaddr = hw_vaddr;
  925. else
  926. tbl.bufq[idx].vaddr = 0;
  927. tbl.bufq[idx].dma_buf = dmabuf;
  928. tbl.bufq[idx].len = len;
  929. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  930. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  931. sizeof(int32_t) * cmd->num_hdl);
  932. tbl.bufq[idx].is_imported = true;
  933. tbl.bufq[idx].is_internal = is_internal;
  934. mutex_unlock(&tbl.bufq[idx].q_lock);
  935. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  936. cmd->out.vaddr = 0;
  937. cmd->out.size = (uint32_t)len;
  938. CAM_DBG(CAM_MEM,
  939. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
  940. cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  941. tbl.bufq[idx].len);
  942. return rc;
  943. map_fail:
  944. cam_mem_put_slot(idx);
  945. slot_fail:
  946. dma_buf_put(dmabuf);
  947. return rc;
  948. }
  949. static int cam_mem_util_unmap_hw_va(int32_t idx,
  950. enum cam_smmu_region_id region,
  951. enum cam_smmu_mapping_client client)
  952. {
  953. int i;
  954. uint32_t flags;
  955. int32_t *mmu_hdls;
  956. int num_hdls;
  957. int fd;
  958. int rc = 0;
  959. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  960. CAM_ERR(CAM_MEM, "Incorrect index");
  961. return -EINVAL;
  962. }
  963. flags = tbl.bufq[idx].flags;
  964. mmu_hdls = tbl.bufq[idx].hdls;
  965. num_hdls = tbl.bufq[idx].num_hdl;
  966. fd = tbl.bufq[idx].fd;
  967. CAM_DBG(CAM_MEM,
  968. "unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d",
  969. idx, fd, flags, num_hdls, client);
  970. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  971. for (i = 0; i < num_hdls; i++) {
  972. rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
  973. if (rc < 0) {
  974. CAM_ERR(CAM_MEM,
  975. "Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d",
  976. i, fd, mmu_hdls[i], rc);
  977. goto unmap_end;
  978. }
  979. }
  980. } else {
  981. for (i = 0; i < num_hdls; i++) {
  982. if (client == CAM_SMMU_MAPPING_USER) {
  983. rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
  984. fd, region);
  985. } else if (client == CAM_SMMU_MAPPING_KERNEL) {
  986. rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
  987. tbl.bufq[idx].dma_buf, region);
  988. } else {
  989. CAM_ERR(CAM_MEM,
  990. "invalid caller for unmapping : %d",
  991. client);
  992. rc = -EINVAL;
  993. }
  994. if (rc < 0) {
  995. CAM_ERR(CAM_MEM,
  996. "Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d",
  997. i, fd, mmu_hdls[i], region, rc);
  998. goto unmap_end;
  999. }
  1000. }
  1001. }
  1002. return rc;
  1003. unmap_end:
  1004. CAM_ERR(CAM_MEM, "unmapping failed");
  1005. return rc;
  1006. }
  1007. static void cam_mem_mgr_unmap_active_buf(int idx)
  1008. {
  1009. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1010. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  1011. region = CAM_SMMU_REGION_SHARED;
  1012. else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1013. region = CAM_SMMU_REGION_IO;
  1014. cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
  1015. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
  1016. cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  1017. tbl.bufq[idx].kmdvaddr);
  1018. }
  1019. static int cam_mem_mgr_cleanup_table(void)
  1020. {
  1021. int i;
  1022. mutex_lock(&tbl.m_lock);
  1023. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  1024. if (!tbl.bufq[i].active) {
  1025. CAM_DBG(CAM_MEM,
  1026. "Buffer inactive at idx=%d, continuing", i);
  1027. continue;
  1028. } else {
  1029. CAM_DBG(CAM_MEM,
  1030. "Active buffer at idx=%d, possible leak needs unmapping",
  1031. i);
  1032. cam_mem_mgr_unmap_active_buf(i);
  1033. }
  1034. mutex_lock(&tbl.bufq[i].q_lock);
  1035. if (tbl.bufq[i].dma_buf) {
  1036. dma_buf_put(tbl.bufq[i].dma_buf);
  1037. tbl.bufq[i].dma_buf = NULL;
  1038. }
  1039. tbl.bufq[i].fd = -1;
  1040. tbl.bufq[i].flags = 0;
  1041. tbl.bufq[i].buf_handle = -1;
  1042. tbl.bufq[i].vaddr = 0;
  1043. tbl.bufq[i].len = 0;
  1044. memset(tbl.bufq[i].hdls, 0,
  1045. sizeof(int32_t) * tbl.bufq[i].num_hdl);
  1046. tbl.bufq[i].num_hdl = 0;
  1047. tbl.bufq[i].dma_buf = NULL;
  1048. tbl.bufq[i].active = false;
  1049. tbl.bufq[i].is_internal = false;
  1050. mutex_unlock(&tbl.bufq[i].q_lock);
  1051. mutex_destroy(&tbl.bufq[i].q_lock);
  1052. }
  1053. bitmap_zero(tbl.bitmap, tbl.bits);
  1054. /* We need to reserve slot 0 because 0 is invalid */
  1055. set_bit(0, tbl.bitmap);
  1056. mutex_unlock(&tbl.m_lock);
  1057. return 0;
  1058. }
  1059. void cam_mem_mgr_deinit(void)
  1060. {
  1061. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
  1062. cam_mem_mgr_cleanup_table();
  1063. debugfs_remove_recursive(tbl.dentry);
  1064. mutex_lock(&tbl.m_lock);
  1065. bitmap_zero(tbl.bitmap, tbl.bits);
  1066. kfree(tbl.bitmap);
  1067. tbl.bitmap = NULL;
  1068. tbl.dbg_buf_idx = -1;
  1069. mutex_unlock(&tbl.m_lock);
  1070. mutex_destroy(&tbl.m_lock);
  1071. }
  1072. static int cam_mem_util_unmap(int32_t idx,
  1073. enum cam_smmu_mapping_client client)
  1074. {
  1075. int rc = 0;
  1076. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1077. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1078. CAM_ERR(CAM_MEM, "Incorrect index");
  1079. return -EINVAL;
  1080. }
  1081. CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
  1082. mutex_lock(&tbl.m_lock);
  1083. if ((!tbl.bufq[idx].active) &&
  1084. (tbl.bufq[idx].vaddr) == 0) {
  1085. CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  1086. idx);
  1087. mutex_unlock(&tbl.m_lock);
  1088. return 0;
  1089. }
  1090. /* Deactivate the buffer queue to prevent multiple unmap */
  1091. mutex_lock(&tbl.bufq[idx].q_lock);
  1092. tbl.bufq[idx].active = false;
  1093. tbl.bufq[idx].vaddr = 0;
  1094. mutex_unlock(&tbl.bufq[idx].q_lock);
  1095. mutex_unlock(&tbl.m_lock);
  1096. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
  1097. if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
  1098. rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  1099. tbl.bufq[idx].kmdvaddr);
  1100. if (rc)
  1101. CAM_ERR(CAM_MEM,
  1102. "Failed, dmabuf=%pK, kmdvaddr=%pK",
  1103. tbl.bufq[idx].dma_buf,
  1104. (void *) tbl.bufq[idx].kmdvaddr);
  1105. }
  1106. }
  1107. /* SHARED flag gets precedence, all other flags after it */
  1108. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1109. region = CAM_SMMU_REGION_SHARED;
  1110. } else {
  1111. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1112. region = CAM_SMMU_REGION_IO;
  1113. }
  1114. if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  1115. (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  1116. (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  1117. if (cam_mem_util_unmap_hw_va(idx, region, client))
  1118. CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
  1119. tbl.bufq[idx].dma_buf);
  1120. if (client == CAM_SMMU_MAPPING_KERNEL)
  1121. tbl.bufq[idx].dma_buf = NULL;
  1122. }
  1123. mutex_lock(&tbl.m_lock);
  1124. mutex_lock(&tbl.bufq[idx].q_lock);
  1125. tbl.bufq[idx].flags = 0;
  1126. tbl.bufq[idx].buf_handle = -1;
  1127. memset(tbl.bufq[idx].hdls, 0,
  1128. sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
  1129. CAM_DBG(CAM_MEM,
  1130. "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK",
  1131. idx, tbl.bufq[idx].fd,
  1132. tbl.bufq[idx].is_imported,
  1133. tbl.bufq[idx].dma_buf);
  1134. if (tbl.bufq[idx].dma_buf)
  1135. dma_buf_put(tbl.bufq[idx].dma_buf);
  1136. tbl.bufq[idx].fd = -1;
  1137. tbl.bufq[idx].dma_buf = NULL;
  1138. tbl.bufq[idx].is_imported = false;
  1139. tbl.bufq[idx].is_internal = false;
  1140. tbl.bufq[idx].len = 0;
  1141. tbl.bufq[idx].num_hdl = 0;
  1142. memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
  1143. mutex_unlock(&tbl.bufq[idx].q_lock);
  1144. mutex_destroy(&tbl.bufq[idx].q_lock);
  1145. clear_bit(idx, tbl.bitmap);
  1146. mutex_unlock(&tbl.m_lock);
  1147. return rc;
  1148. }
  1149. int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
  1150. {
  1151. int idx;
  1152. int rc;
  1153. if (!atomic_read(&cam_mem_mgr_state)) {
  1154. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1155. return -EINVAL;
  1156. }
  1157. if (!cmd) {
  1158. CAM_ERR(CAM_MEM, "Invalid argument");
  1159. return -EINVAL;
  1160. }
  1161. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  1162. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1163. CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
  1164. idx);
  1165. return -EINVAL;
  1166. }
  1167. if (!tbl.bufq[idx].active) {
  1168. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1169. return -EINVAL;
  1170. }
  1171. if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
  1172. CAM_ERR(CAM_MEM,
  1173. "Released buf handle %d not matching within table %d, idx=%d",
  1174. cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
  1175. return -EINVAL;
  1176. }
  1177. CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
  1178. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
  1179. return rc;
  1180. }
  1181. int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
  1182. struct cam_mem_mgr_memory_desc *out)
  1183. {
  1184. struct dma_buf *buf = NULL;
  1185. int ion_fd = -1;
  1186. int rc = 0;
  1187. uintptr_t kvaddr;
  1188. dma_addr_t iova = 0;
  1189. size_t request_len = 0;
  1190. uint32_t mem_handle;
  1191. int32_t idx;
  1192. int32_t smmu_hdl = 0;
  1193. int32_t num_hdl = 0;
  1194. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1195. if (!atomic_read(&cam_mem_mgr_state)) {
  1196. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1197. return -EINVAL;
  1198. }
  1199. if (!inp || !out) {
  1200. CAM_ERR(CAM_MEM, "Invalid params");
  1201. return -EINVAL;
  1202. }
  1203. if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
  1204. inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
  1205. inp->flags & CAM_MEM_FLAG_CACHE)) {
  1206. CAM_ERR(CAM_MEM, "Invalid flags for request mem");
  1207. return -EINVAL;
  1208. }
  1209. rc = cam_mem_util_get_dma_buf(inp->size,
  1210. inp->flags,
  1211. &buf);
  1212. if (rc) {
  1213. CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
  1214. goto ion_fail;
  1215. } else {
  1216. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1217. }
  1218. /*
  1219. * we are mapping kva always here,
  1220. * update flags so that we do unmap properly
  1221. */
  1222. inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
  1223. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  1224. if (rc) {
  1225. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  1226. goto map_fail;
  1227. }
  1228. if (!inp->smmu_hdl) {
  1229. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1230. rc = -EINVAL;
  1231. goto smmu_fail;
  1232. }
  1233. /* SHARED flag gets precedence, all other flags after it */
  1234. if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1235. region = CAM_SMMU_REGION_SHARED;
  1236. } else {
  1237. if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1238. region = CAM_SMMU_REGION_IO;
  1239. }
  1240. rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
  1241. buf,
  1242. CAM_SMMU_MAP_RW,
  1243. &iova,
  1244. &request_len,
  1245. region);
  1246. if (rc < 0) {
  1247. CAM_ERR(CAM_MEM, "SMMU mapping failed");
  1248. goto smmu_fail;
  1249. }
  1250. smmu_hdl = inp->smmu_hdl;
  1251. num_hdl = 1;
  1252. idx = cam_mem_get_slot();
  1253. if (idx < 0) {
  1254. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  1255. rc = -ENOMEM;
  1256. goto slot_fail;
  1257. }
  1258. mutex_lock(&tbl.bufq[idx].q_lock);
  1259. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1260. tbl.bufq[idx].dma_buf = buf;
  1261. tbl.bufq[idx].fd = -1;
  1262. tbl.bufq[idx].flags = inp->flags;
  1263. tbl.bufq[idx].buf_handle = mem_handle;
  1264. tbl.bufq[idx].kmdvaddr = kvaddr;
  1265. tbl.bufq[idx].vaddr = iova;
  1266. tbl.bufq[idx].len = inp->size;
  1267. tbl.bufq[idx].num_hdl = num_hdl;
  1268. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1269. sizeof(int32_t));
  1270. tbl.bufq[idx].is_imported = false;
  1271. mutex_unlock(&tbl.bufq[idx].q_lock);
  1272. out->kva = kvaddr;
  1273. out->iova = (uint32_t)iova;
  1274. out->smmu_hdl = smmu_hdl;
  1275. out->mem_handle = mem_handle;
  1276. out->len = inp->size;
  1277. out->region = region;
  1278. return rc;
  1279. slot_fail:
  1280. cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
  1281. buf, region);
  1282. smmu_fail:
  1283. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  1284. map_fail:
  1285. dma_buf_put(buf);
  1286. ion_fail:
  1287. return rc;
  1288. }
  1289. EXPORT_SYMBOL(cam_mem_mgr_request_mem);
  1290. int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
  1291. {
  1292. int32_t idx;
  1293. int rc;
  1294. if (!atomic_read(&cam_mem_mgr_state)) {
  1295. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1296. return -EINVAL;
  1297. }
  1298. if (!inp) {
  1299. CAM_ERR(CAM_MEM, "Invalid argument");
  1300. return -EINVAL;
  1301. }
  1302. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1303. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1304. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1305. return -EINVAL;
  1306. }
  1307. if (!tbl.bufq[idx].active) {
  1308. if (tbl.bufq[idx].vaddr == 0) {
  1309. CAM_ERR(CAM_MEM, "buffer is released already");
  1310. return 0;
  1311. }
  1312. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1313. return -EINVAL;
  1314. }
  1315. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1316. CAM_ERR(CAM_MEM,
  1317. "Released buf handle not matching within table");
  1318. return -EINVAL;
  1319. }
  1320. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1321. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1322. return rc;
  1323. }
  1324. EXPORT_SYMBOL(cam_mem_mgr_release_mem);
  1325. int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
  1326. enum cam_smmu_region_id region,
  1327. struct cam_mem_mgr_memory_desc *out)
  1328. {
  1329. struct dma_buf *buf = NULL;
  1330. int rc = 0;
  1331. int ion_fd = -1;
  1332. dma_addr_t iova = 0;
  1333. size_t request_len = 0;
  1334. uint32_t mem_handle;
  1335. int32_t idx;
  1336. int32_t smmu_hdl = 0;
  1337. int32_t num_hdl = 0;
  1338. if (!atomic_read(&cam_mem_mgr_state)) {
  1339. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1340. return -EINVAL;
  1341. }
  1342. if (!inp || !out) {
  1343. CAM_ERR(CAM_MEM, "Invalid param(s)");
  1344. return -EINVAL;
  1345. }
  1346. if (!inp->smmu_hdl) {
  1347. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1348. return -EINVAL;
  1349. }
  1350. if (region != CAM_SMMU_REGION_SECHEAP) {
  1351. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1352. return -EINVAL;
  1353. }
  1354. rc = cam_mem_util_get_dma_buf(inp->size,
  1355. 0,
  1356. &buf);
  1357. if (rc) {
  1358. CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
  1359. goto ion_fail;
  1360. } else {
  1361. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1362. }
  1363. rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
  1364. buf,
  1365. &iova,
  1366. &request_len);
  1367. if (rc) {
  1368. CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
  1369. goto smmu_fail;
  1370. }
  1371. smmu_hdl = inp->smmu_hdl;
  1372. num_hdl = 1;
  1373. idx = cam_mem_get_slot();
  1374. if (idx < 0) {
  1375. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  1376. rc = -ENOMEM;
  1377. goto slot_fail;
  1378. }
  1379. mutex_lock(&tbl.bufq[idx].q_lock);
  1380. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1381. tbl.bufq[idx].fd = -1;
  1382. tbl.bufq[idx].dma_buf = buf;
  1383. tbl.bufq[idx].flags = inp->flags;
  1384. tbl.bufq[idx].buf_handle = mem_handle;
  1385. tbl.bufq[idx].kmdvaddr = 0;
  1386. tbl.bufq[idx].vaddr = iova;
  1387. tbl.bufq[idx].len = request_len;
  1388. tbl.bufq[idx].num_hdl = num_hdl;
  1389. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1390. sizeof(int32_t));
  1391. tbl.bufq[idx].is_imported = false;
  1392. mutex_unlock(&tbl.bufq[idx].q_lock);
  1393. out->kva = 0;
  1394. out->iova = (uint32_t)iova;
  1395. out->smmu_hdl = smmu_hdl;
  1396. out->mem_handle = mem_handle;
  1397. out->len = request_len;
  1398. out->region = region;
  1399. return rc;
  1400. slot_fail:
  1401. cam_smmu_release_sec_heap(smmu_hdl);
  1402. smmu_fail:
  1403. dma_buf_put(buf);
  1404. ion_fail:
  1405. return rc;
  1406. }
  1407. EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
  1408. int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
  1409. {
  1410. int32_t idx;
  1411. int rc;
  1412. int32_t smmu_hdl;
  1413. if (!atomic_read(&cam_mem_mgr_state)) {
  1414. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1415. return -EINVAL;
  1416. }
  1417. if (!inp) {
  1418. CAM_ERR(CAM_MEM, "Invalid argument");
  1419. return -EINVAL;
  1420. }
  1421. if (inp->region != CAM_SMMU_REGION_SECHEAP) {
  1422. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1423. return -EINVAL;
  1424. }
  1425. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1426. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1427. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1428. return -EINVAL;
  1429. }
  1430. if (!tbl.bufq[idx].active) {
  1431. if (tbl.bufq[idx].vaddr == 0) {
  1432. CAM_ERR(CAM_MEM, "buffer is released already");
  1433. return 0;
  1434. }
  1435. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1436. return -EINVAL;
  1437. }
  1438. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1439. CAM_ERR(CAM_MEM,
  1440. "Released buf handle not matching within table");
  1441. return -EINVAL;
  1442. }
  1443. if (tbl.bufq[idx].num_hdl != 1) {
  1444. CAM_ERR(CAM_MEM,
  1445. "Sec heap region should have only one smmu hdl");
  1446. return -ENODEV;
  1447. }
  1448. memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
  1449. sizeof(int32_t));
  1450. if (inp->smmu_hdl != smmu_hdl) {
  1451. CAM_ERR(CAM_MEM,
  1452. "Passed SMMU handle doesn't match with internal hdl");
  1453. return -ENODEV;
  1454. }
  1455. rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
  1456. if (rc) {
  1457. CAM_ERR(CAM_MEM,
  1458. "Sec heap region release failed");
  1459. return -ENODEV;
  1460. }
  1461. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1462. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1463. if (rc)
  1464. CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
  1465. return rc;
  1466. }
  1467. EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);