cam_mem_mgr.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/mutex.h>
  9. #include <linux/slab.h>
  10. #include <linux/dma-buf.h>
  11. #include <linux/version.h>
  12. #include <linux/debugfs.h>
  13. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  14. #include <linux/mem-buf.h>
  15. #include <soc/qcom/secure_buffer.h>
  16. #endif
  17. #include "cam_compat.h"
  18. #include "cam_req_mgr_util.h"
  19. #include "cam_mem_mgr.h"
  20. #include "cam_smmu_api.h"
  21. #include "cam_debug_util.h"
  22. #include "cam_trace.h"
  23. #include "cam_common_util.h"
  24. #include "cam_presil_hw_access.h"
  25. #include "cam_compat.h"
  26. #define CAM_MEM_SHARED_BUFFER_PAD_4K (4 * 1024)
  27. static struct cam_mem_table tbl;
  28. static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
  29. /* cam_mem_mgr_debug - global struct to keep track of debug settings for mem mgr
  30. *
  31. * @dentry : Directory entry to the mem mgr root folder
  32. * @alloc_profile_enable : Whether to enable alloc profiling
  33. */
  34. static struct {
  35. struct dentry *dentry;
  36. bool alloc_profile_enable;
  37. } g_cam_mem_mgr_debug;
  38. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  39. static void cam_mem_mgr_put_dma_heaps(void);
  40. static int cam_mem_mgr_get_dma_heaps(void);
  41. #endif
  42. #ifdef CONFIG_CAM_PRESIL
  43. static inline void cam_mem_mgr_reset_presil_params(int idx)
  44. {
  45. tbl.bufq[idx].presil_params.fd_for_umd_daemon = -1;
  46. tbl.bufq[idx].presil_params.refcount = 0;
  47. }
  48. #else
  49. static inline void cam_mem_mgr_reset_presil_params(int idx)
  50. {
  51. return;
  52. }
  53. #endif
  54. static unsigned long cam_mem_mgr_mini_dump_cb(void *dst, unsigned long len)
  55. {
  56. struct cam_mem_table_mini_dump *md;
  57. if (!dst) {
  58. CAM_ERR(CAM_MEM, "Invalid params");
  59. return 0;
  60. }
  61. if (len < sizeof(*md)) {
  62. CAM_ERR(CAM_MEM, "Insufficient length %u", len);
  63. return 0;
  64. }
  65. md = (struct cam_mem_table_mini_dump *)dst;
  66. memcpy(md->bufq, tbl.bufq, CAM_MEM_BUFQ_MAX * sizeof(struct cam_mem_buf_queue));
  67. md->dbg_buf_idx = tbl.dbg_buf_idx;
  68. md->alloc_profile_enable = g_cam_mem_mgr_debug.alloc_profile_enable;
  69. md->force_cache_allocs = tbl.force_cache_allocs;
  70. md->need_shared_buffer_padding = tbl.need_shared_buffer_padding;
  71. return sizeof(*md);
  72. }
  73. static void cam_mem_mgr_print_tbl(void)
  74. {
  75. int i;
  76. uint64_t ms, hrs, min, sec;
  77. struct timespec64 current_ts;
  78. CAM_GET_TIMESTAMP(current_ts);
  79. CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
  80. CAM_INFO(CAM_MEM, "***%llu:%llu:%llu:%llu Mem mgr table dump***",
  81. hrs, min, sec, ms);
  82. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  83. if (tbl.bufq[i].active) {
  84. CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[i].timestamp), hrs, min, sec, ms);
  85. CAM_INFO(CAM_MEM,
  86. "%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu",
  87. hrs, min, sec, ms, i, tbl.bufq[i].fd, tbl.bufq[i].i_ino,
  88. tbl.bufq[i].len);
  89. }
  90. }
  91. }
  92. static int cam_mem_util_get_dma_dir(uint32_t flags)
  93. {
  94. int rc = -EINVAL;
  95. if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
  96. rc = DMA_TO_DEVICE;
  97. else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
  98. rc = DMA_FROM_DEVICE;
  99. else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
  100. rc = DMA_BIDIRECTIONAL;
  101. else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  102. rc = DMA_BIDIRECTIONAL;
  103. return rc;
  104. }
  105. static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf, uintptr_t *vaddr, size_t *len)
  106. {
  107. int rc = 0;
  108. /*
  109. * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  110. * need to be called in pair to avoid stability issue.
  111. */
  112. rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  113. if (rc) {
  114. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  115. return rc;
  116. }
  117. rc = cam_compat_util_get_dmabuf_va(dmabuf, vaddr);
  118. if (rc) {
  119. CAM_ERR(CAM_MEM, "kernel vmap failed: rc = %d", rc);
  120. *len = 0;
  121. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  122. }
  123. else {
  124. *len = dmabuf->size;
  125. CAM_DBG(CAM_MEM, "vaddr = %llu, len = %zu", *vaddr, *len);
  126. }
  127. return rc;
  128. }
  129. static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
  130. uint64_t vaddr)
  131. {
  132. int rc = 0;
  133. if (!dmabuf || !vaddr) {
  134. CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
  135. return -EINVAL;
  136. }
  137. cam_compat_util_put_dmabuf_va(dmabuf, (void *)vaddr);
  138. /*
  139. * dma_buf_begin_cpu_access() and
  140. * dma_buf_end_cpu_access() need to be called in pair
  141. * to avoid stability issue.
  142. */
  143. rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  144. if (rc) {
  145. CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
  146. dmabuf);
  147. return rc;
  148. }
  149. return rc;
  150. }
  151. static int cam_mem_mgr_create_debug_fs(void)
  152. {
  153. int rc = 0;
  154. struct dentry *dbgfileptr = NULL;
  155. if (!cam_debugfs_available() || g_cam_mem_mgr_debug.dentry)
  156. return 0;
  157. rc = cam_debugfs_create_subdir("memmgr", &dbgfileptr);
  158. if (rc) {
  159. CAM_ERR(CAM_MEM, "DebugFS could not create directory!");
  160. rc = -ENOENT;
  161. goto end;
  162. }
  163. g_cam_mem_mgr_debug.dentry = dbgfileptr;
  164. debugfs_create_bool("alloc_profile_enable", 0644, g_cam_mem_mgr_debug.dentry,
  165. &g_cam_mem_mgr_debug.alloc_profile_enable);
  166. end:
  167. return rc;
  168. }
  169. int cam_mem_mgr_init(void)
  170. {
  171. int i;
  172. int bitmap_size;
  173. int rc = 0;
  174. if (atomic_read(&cam_mem_mgr_state))
  175. return 0;
  176. memset(tbl.bufq, 0, sizeof(tbl.bufq));
  177. if (cam_smmu_need_force_alloc_cached(&tbl.force_cache_allocs)) {
  178. CAM_ERR(CAM_MEM, "Error in getting force cache alloc flag");
  179. return -EINVAL;
  180. }
  181. tbl.need_shared_buffer_padding = cam_smmu_need_shared_buffer_padding();
  182. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  183. rc = cam_mem_mgr_get_dma_heaps();
  184. if (rc) {
  185. CAM_ERR(CAM_MEM, "Failed in getting dma heaps rc=%d", rc);
  186. return rc;
  187. }
  188. #endif
  189. bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
  190. tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  191. if (!tbl.bitmap) {
  192. rc = -ENOMEM;
  193. goto put_heaps;
  194. }
  195. tbl.bits = bitmap_size * BITS_PER_BYTE;
  196. bitmap_zero(tbl.bitmap, tbl.bits);
  197. /* We need to reserve slot 0 because 0 is invalid */
  198. set_bit(0, tbl.bitmap);
  199. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  200. tbl.bufq[i].fd = -1;
  201. tbl.bufq[i].buf_handle = -1;
  202. cam_mem_mgr_reset_presil_params(i);
  203. }
  204. mutex_init(&tbl.m_lock);
  205. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
  206. cam_mem_mgr_create_debug_fs();
  207. cam_common_register_mini_dump_cb(cam_mem_mgr_mini_dump_cb,
  208. "cam_mem");
  209. return 0;
  210. put_heaps:
  211. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  212. cam_mem_mgr_put_dma_heaps();
  213. #endif
  214. return rc;
  215. }
  216. static int32_t cam_mem_get_slot(void)
  217. {
  218. int32_t idx;
  219. mutex_lock(&tbl.m_lock);
  220. idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
  221. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  222. mutex_unlock(&tbl.m_lock);
  223. return -ENOMEM;
  224. }
  225. set_bit(idx, tbl.bitmap);
  226. tbl.bufq[idx].active = true;
  227. CAM_GET_TIMESTAMP((tbl.bufq[idx].timestamp));
  228. mutex_init(&tbl.bufq[idx].q_lock);
  229. mutex_unlock(&tbl.m_lock);
  230. return idx;
  231. }
  232. static void cam_mem_put_slot(int32_t idx)
  233. {
  234. mutex_lock(&tbl.m_lock);
  235. mutex_lock(&tbl.bufq[idx].q_lock);
  236. tbl.bufq[idx].active = false;
  237. tbl.bufq[idx].is_internal = false;
  238. memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
  239. mutex_unlock(&tbl.bufq[idx].q_lock);
  240. mutex_destroy(&tbl.bufq[idx].q_lock);
  241. clear_bit(idx, tbl.bitmap);
  242. mutex_unlock(&tbl.m_lock);
  243. }
  244. int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
  245. dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags)
  246. {
  247. int rc = 0, idx;
  248. *len_ptr = 0;
  249. if (!atomic_read(&cam_mem_mgr_state)) {
  250. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  251. return -EINVAL;
  252. }
  253. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  254. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  255. return -ENOENT;
  256. if (!tbl.bufq[idx].active) {
  257. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  258. idx);
  259. return -EAGAIN;
  260. }
  261. mutex_lock(&tbl.bufq[idx].q_lock);
  262. if (buf_handle != tbl.bufq[idx].buf_handle) {
  263. rc = -EINVAL;
  264. goto handle_mismatch;
  265. }
  266. if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
  267. rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
  268. iova_ptr, len_ptr);
  269. else
  270. rc = cam_smmu_get_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
  271. iova_ptr, len_ptr);
  272. if (rc) {
  273. CAM_ERR(CAM_MEM,
  274. "fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d i_ino:%lu",
  275. buf_handle, mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino);
  276. goto handle_mismatch;
  277. }
  278. if (flags)
  279. *flags = tbl.bufq[idx].flags;
  280. CAM_DBG(CAM_MEM,
  281. "handle:0x%x fd:%d i_ino:%lu iova_ptr:0x%lx len_ptr:%lu",
  282. mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, *iova_ptr, *len_ptr);
  283. handle_mismatch:
  284. mutex_unlock(&tbl.bufq[idx].q_lock);
  285. return rc;
  286. }
  287. EXPORT_SYMBOL(cam_mem_get_io_buf);
  288. int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
  289. {
  290. int idx;
  291. if (!atomic_read(&cam_mem_mgr_state)) {
  292. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  293. return -EINVAL;
  294. }
  295. if (!buf_handle || !vaddr_ptr || !len)
  296. return -EINVAL;
  297. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  298. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  299. return -EINVAL;
  300. if (!tbl.bufq[idx].active) {
  301. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  302. idx);
  303. return -EPERM;
  304. }
  305. if (buf_handle != tbl.bufq[idx].buf_handle)
  306. return -EINVAL;
  307. if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
  308. return -EINVAL;
  309. if (tbl.bufq[idx].kmdvaddr) {
  310. *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
  311. *len = tbl.bufq[idx].len;
  312. } else {
  313. CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
  314. buf_handle);
  315. return -EINVAL;
  316. }
  317. return 0;
  318. }
  319. EXPORT_SYMBOL(cam_mem_get_cpu_buf);
  320. int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
  321. {
  322. int rc = 0, idx;
  323. uint32_t cache_dir;
  324. unsigned long dmabuf_flag = 0;
  325. if (!atomic_read(&cam_mem_mgr_state)) {
  326. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  327. return -EINVAL;
  328. }
  329. if (!cmd)
  330. return -EINVAL;
  331. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  332. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  333. return -EINVAL;
  334. mutex_lock(&tbl.m_lock);
  335. if (!test_bit(idx, tbl.bitmap)) {
  336. CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  337. idx);
  338. mutex_unlock(&tbl.m_lock);
  339. return -EINVAL;
  340. }
  341. mutex_lock(&tbl.bufq[idx].q_lock);
  342. mutex_unlock(&tbl.m_lock);
  343. if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
  344. rc = -EINVAL;
  345. goto end;
  346. }
  347. rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
  348. if (rc) {
  349. CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
  350. goto end;
  351. }
  352. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  353. CAM_DBG(CAM_MEM, "Calling dmap buf APIs for cache operations");
  354. cache_dir = DMA_BIDIRECTIONAL;
  355. #else
  356. if (dmabuf_flag & ION_FLAG_CACHED) {
  357. switch (cmd->mem_cache_ops) {
  358. case CAM_MEM_CLEAN_CACHE:
  359. cache_dir = DMA_TO_DEVICE;
  360. break;
  361. case CAM_MEM_INV_CACHE:
  362. cache_dir = DMA_FROM_DEVICE;
  363. break;
  364. case CAM_MEM_CLEAN_INV_CACHE:
  365. cache_dir = DMA_BIDIRECTIONAL;
  366. break;
  367. default:
  368. CAM_ERR(CAM_MEM,
  369. "invalid cache ops :%d", cmd->mem_cache_ops);
  370. rc = -EINVAL;
  371. goto end;
  372. }
  373. } else {
  374. CAM_DBG(CAM_MEM, "BUF is not cached");
  375. goto end;
  376. }
  377. #endif
  378. rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
  379. (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
  380. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  381. if (rc) {
  382. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  383. goto end;
  384. }
  385. rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
  386. cache_dir);
  387. if (rc) {
  388. CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
  389. goto end;
  390. }
  391. end:
  392. mutex_unlock(&tbl.bufq[idx].q_lock);
  393. return rc;
  394. }
  395. EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
  396. #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
  397. #define CAM_MAX_VMIDS 4
  398. static void cam_mem_mgr_put_dma_heaps(void)
  399. {
  400. CAM_DBG(CAM_MEM, "Releasing DMA Buf heaps usage");
  401. }
  402. static int cam_mem_mgr_get_dma_heaps(void)
  403. {
  404. int rc = 0;
  405. tbl.system_heap = NULL;
  406. tbl.system_uncached_heap = NULL;
  407. tbl.camera_heap = NULL;
  408. tbl.camera_uncached_heap = NULL;
  409. tbl.secure_display_heap = NULL;
  410. tbl.system_heap = dma_heap_find("qcom,system");
  411. if (IS_ERR_OR_NULL(tbl.system_heap)) {
  412. rc = PTR_ERR(tbl.system_heap);
  413. CAM_ERR(CAM_MEM, "qcom system heap not found, rc=%d", rc);
  414. tbl.system_heap = NULL;
  415. goto put_heaps;
  416. }
  417. tbl.system_uncached_heap = dma_heap_find("qcom,system-uncached");
  418. if (IS_ERR_OR_NULL(tbl.system_uncached_heap)) {
  419. if (tbl.force_cache_allocs) {
  420. /* optional, we anyway do not use uncached */
  421. CAM_DBG(CAM_MEM,
  422. "qcom system-uncached heap not found, err=%d",
  423. PTR_ERR(tbl.system_uncached_heap));
  424. tbl.system_uncached_heap = NULL;
  425. } else {
  426. /* fatal, must need uncached heaps */
  427. rc = PTR_ERR(tbl.system_uncached_heap);
  428. CAM_ERR(CAM_MEM,
  429. "qcom system-uncached heap not found, rc=%d",
  430. rc);
  431. tbl.system_uncached_heap = NULL;
  432. goto put_heaps;
  433. }
  434. }
  435. tbl.secure_display_heap = dma_heap_find("qcom,display");
  436. if (IS_ERR_OR_NULL(tbl.secure_display_heap)) {
  437. rc = PTR_ERR(tbl.secure_display_heap);
  438. CAM_ERR(CAM_MEM, "qcom,display heap not found, rc=%d",
  439. rc);
  440. tbl.secure_display_heap = NULL;
  441. goto put_heaps;
  442. }
  443. tbl.camera_heap = dma_heap_find("qcom,camera");
  444. if (IS_ERR_OR_NULL(tbl.camera_heap)) {
  445. /* optional heap, not a fatal error */
  446. CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
  447. PTR_ERR(tbl.camera_heap));
  448. tbl.camera_heap = NULL;
  449. }
  450. tbl.camera_uncached_heap = dma_heap_find("qcom,camera-uncached");
  451. if (IS_ERR_OR_NULL(tbl.camera_uncached_heap)) {
  452. /* optional heap, not a fatal error */
  453. CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
  454. PTR_ERR(tbl.camera_uncached_heap));
  455. tbl.camera_uncached_heap = NULL;
  456. }
  457. CAM_INFO(CAM_MEM,
  458. "Heaps : system=%pK, system_uncached=%pK, camera=%pK, camera-uncached=%pK, secure_display=%pK",
  459. tbl.system_heap, tbl.system_uncached_heap,
  460. tbl.camera_heap, tbl.camera_uncached_heap,
  461. tbl.secure_display_heap);
  462. return 0;
  463. put_heaps:
  464. cam_mem_mgr_put_dma_heaps();
  465. return rc;
  466. }
  467. static int cam_mem_util_get_dma_buf(size_t len,
  468. unsigned int cam_flags,
  469. struct dma_buf **buf,
  470. unsigned long *i_ino)
  471. {
  472. int rc = 0;
  473. struct dma_heap *heap;
  474. struct dma_heap *try_heap = NULL;
  475. struct timespec64 ts1, ts2;
  476. long microsec = 0;
  477. bool use_cached_heap = false;
  478. struct mem_buf_lend_kernel_arg arg;
  479. int vmids[CAM_MAX_VMIDS];
  480. int perms[CAM_MAX_VMIDS];
  481. int num_vmids = 0;
  482. if (!buf) {
  483. CAM_ERR(CAM_MEM, "Invalid params");
  484. return -EINVAL;
  485. }
  486. if (g_cam_mem_mgr_debug.alloc_profile_enable)
  487. CAM_GET_TIMESTAMP(ts1);
  488. if ((cam_flags & CAM_MEM_FLAG_CACHE) ||
  489. (tbl.force_cache_allocs &&
  490. (!(cam_flags & CAM_MEM_FLAG_PROTECTED_MODE)))) {
  491. CAM_DBG(CAM_MEM,
  492. "Using CACHED heap, cam_flags=0x%x, force_cache_allocs=%d",
  493. cam_flags, tbl.force_cache_allocs);
  494. use_cached_heap = true;
  495. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  496. use_cached_heap = true;
  497. CAM_DBG(CAM_MEM,
  498. "Using CACHED heap for secure, cam_flags=0x%x, force_cache_allocs=%d",
  499. cam_flags, tbl.force_cache_allocs);
  500. } else {
  501. use_cached_heap = false;
  502. CAM_ERR(CAM_MEM,
  503. "Using UNCACHED heap not supported, cam_flags=0x%x, force_cache_allocs=%d",
  504. cam_flags, tbl.force_cache_allocs);
  505. /*
  506. * Need a better handling based on whether dma-buf-heaps support
  507. * uncached heaps or not. For now, assume not supported.
  508. */
  509. return -EINVAL;
  510. }
  511. if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  512. heap = tbl.secure_display_heap;
  513. vmids[num_vmids] = VMID_CP_CAMERA;
  514. perms[num_vmids] = PERM_READ | PERM_WRITE;
  515. num_vmids++;
  516. if (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT) {
  517. CAM_DBG(CAM_MEM, "Secure mode CDSP flags");
  518. vmids[num_vmids] = VMID_CP_CDSP;
  519. perms[num_vmids] = PERM_READ | PERM_WRITE;
  520. num_vmids++;
  521. }
  522. } else if (cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL) {
  523. heap = tbl.secure_display_heap;
  524. vmids[num_vmids] = VMID_CP_NON_PIXEL;
  525. perms[num_vmids] = PERM_READ | PERM_WRITE;
  526. num_vmids++;
  527. } else if (use_cached_heap) {
  528. try_heap = tbl.camera_heap;
  529. heap = tbl.system_heap;
  530. } else {
  531. try_heap = tbl.camera_uncached_heap;
  532. heap = tbl.system_uncached_heap;
  533. }
  534. CAM_DBG(CAM_MEM, "Using heaps : try=%pK, heap=%pK", try_heap, heap);
  535. *buf = NULL;
  536. if (!try_heap && !heap) {
  537. CAM_ERR(CAM_MEM,
  538. "No heap available for allocation, cant allocate");
  539. return -EINVAL;
  540. }
  541. if (try_heap) {
  542. *buf = dma_heap_buffer_alloc(try_heap, len, O_RDWR, 0);
  543. if (IS_ERR(*buf)) {
  544. CAM_WARN(CAM_MEM,
  545. "Failed in allocating from try heap, heap=%pK, len=%zu, err=%d",
  546. try_heap, len, PTR_ERR(*buf));
  547. *buf = NULL;
  548. }
  549. }
  550. if (*buf == NULL) {
  551. *buf = dma_heap_buffer_alloc(heap, len, O_RDWR, 0);
  552. if (IS_ERR(*buf)) {
  553. rc = PTR_ERR(*buf);
  554. CAM_ERR(CAM_MEM,
  555. "Failed in allocating from heap, heap=%pK, len=%zu, err=%d",
  556. heap, len, rc);
  557. *buf = NULL;
  558. return rc;
  559. }
  560. }
  561. *i_ino = file_inode((*buf)->file)->i_ino;
  562. if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) ||
  563. (cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL)) {
  564. if (num_vmids >= CAM_MAX_VMIDS) {
  565. CAM_ERR(CAM_MEM, "Insufficient array size for vmids %d", num_vmids);
  566. rc = -EINVAL;
  567. goto end;
  568. }
  569. arg.nr_acl_entries = num_vmids;
  570. arg.vmids = vmids;
  571. arg.perms = perms;
  572. rc = mem_buf_lend(*buf, &arg);
  573. if (rc) {
  574. CAM_ERR(CAM_MEM,
  575. "Failed in buf lend rc=%d, buf=%pK, vmids [0]=0x%x, [1]=0x%x, [2]=0x%x",
  576. rc, *buf, vmids[0], vmids[1], vmids[2]);
  577. goto end;
  578. }
  579. }
  580. CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK, i_ino=%lu", len, *buf, *i_ino);
  581. if (g_cam_mem_mgr_debug.alloc_profile_enable) {
  582. CAM_GET_TIMESTAMP(ts2);
  583. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  584. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  585. len, microsec);
  586. }
  587. return rc;
  588. end:
  589. dma_buf_put(*buf);
  590. return rc;
  591. }
  592. #else
  593. static int cam_mem_util_get_dma_buf(size_t len,
  594. unsigned int cam_flags,
  595. struct dma_buf **buf,
  596. unsigned long *i_ino)
  597. {
  598. int rc = 0;
  599. unsigned int heap_id;
  600. int32_t ion_flag = 0;
  601. struct timespec64 ts1, ts2;
  602. long microsec = 0;
  603. if (!buf) {
  604. CAM_ERR(CAM_MEM, "Invalid params");
  605. return -EINVAL;
  606. }
  607. if (g_cam_mem_mgr_debug.alloc_profile_enable)
  608. CAM_GET_TIMESTAMP(ts1);
  609. if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
  610. (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
  611. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  612. ion_flag |=
  613. ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
  614. } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  615. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  616. ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
  617. } else {
  618. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  619. ION_HEAP(ION_CAMERA_HEAP_ID);
  620. }
  621. if (cam_flags & CAM_MEM_FLAG_CACHE)
  622. ion_flag |= ION_FLAG_CACHED;
  623. else
  624. ion_flag &= ~ION_FLAG_CACHED;
  625. if (tbl.force_cache_allocs && (!(ion_flag & ION_FLAG_SECURE)))
  626. ion_flag |= ION_FLAG_CACHED;
  627. *buf = ion_alloc(len, heap_id, ion_flag);
  628. if (IS_ERR_OR_NULL(*buf))
  629. return -ENOMEM;
  630. *i_ino = file_inode((*buf)->file)->i_ino;
  631. if (g_cam_mem_mgr_debug.alloc_profile_enable) {
  632. CAM_GET_TIMESTAMP(ts2);
  633. CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
  634. trace_cam_log_event("IONAllocProfile", "size and time in micro",
  635. len, microsec);
  636. }
  637. return rc;
  638. }
  639. #endif
  640. static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
  641. struct dma_buf **dmabuf,
  642. int *fd,
  643. unsigned long *i_ino)
  644. {
  645. int rc;
  646. struct dma_buf *temp_dmabuf = NULL;
  647. rc = cam_mem_util_get_dma_buf(len, flags, dmabuf, i_ino);
  648. if (rc) {
  649. CAM_ERR(CAM_MEM,
  650. "Error allocating dma buf : len=%llu, flags=0x%x",
  651. len, flags);
  652. return rc;
  653. }
  654. *fd = dma_buf_fd(*dmabuf, O_CLOEXEC);
  655. if (*fd < 0) {
  656. CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
  657. rc = -EINVAL;
  658. goto put_buf;
  659. }
  660. CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d, i_ino=%lu",
  661. len, *dmabuf, *fd, *i_ino);
  662. /*
  663. * increment the ref count so that ref count becomes 2 here
  664. * when we close fd, refcount becomes 1 and when we do
  665. * dmap_put_buf, ref count becomes 0 and memory will be freed.
  666. */
  667. temp_dmabuf = dma_buf_get(*fd);
  668. if (IS_ERR_OR_NULL(temp_dmabuf)) {
  669. rc = PTR_ERR(temp_dmabuf);
  670. CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d, i_ino=%lu, rc=%d", *fd, *i_ino, rc);
  671. goto put_buf;
  672. }
  673. return rc;
  674. put_buf:
  675. dma_buf_put(*dmabuf);
  676. return rc;
  677. }
  678. static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
  679. {
  680. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  681. CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
  682. CAM_MEM_MMU_MAX_HANDLE);
  683. return -EINVAL;
  684. }
  685. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  686. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  687. CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
  688. return -EINVAL;
  689. }
  690. if ((cmd->flags & CAM_MEM_FLAG_EVA_NOPIXEL) &&
  691. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE ||
  692. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)){
  693. CAM_ERR(CAM_MEM,
  694. "Kernel mapping and secure mode not allowed in no pixel mode");
  695. return -EINVAL;
  696. }
  697. return 0;
  698. }
  699. static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
  700. {
  701. if (!cmd->flags) {
  702. CAM_ERR(CAM_MEM, "Invalid flags");
  703. return -EINVAL;
  704. }
  705. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  706. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  707. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  708. return -EINVAL;
  709. }
  710. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  711. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  712. CAM_ERR(CAM_MEM,
  713. "Kernel mapping in secure mode not allowed, flags=0x%x",
  714. cmd->flags);
  715. return -EINVAL;
  716. }
  717. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  718. CAM_ERR(CAM_MEM,
  719. "Shared memory buffers are not allowed to be mapped");
  720. return -EINVAL;
  721. }
  722. return 0;
  723. }
  724. static int cam_mem_util_map_hw_va(uint32_t flags,
  725. int32_t *mmu_hdls,
  726. int32_t num_hdls,
  727. int fd,
  728. struct dma_buf *dmabuf,
  729. dma_addr_t *hw_vaddr,
  730. size_t *len,
  731. enum cam_smmu_region_id region,
  732. bool is_internal)
  733. {
  734. int i;
  735. int rc = -1;
  736. int dir = cam_mem_util_get_dma_dir(flags);
  737. bool dis_delayed_unmap = false;
  738. if (dir < 0) {
  739. CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
  740. return dir;
  741. }
  742. if (flags & CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP)
  743. dis_delayed_unmap = true;
  744. CAM_DBG(CAM_MEM,
  745. "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d",
  746. fd, flags, dir, num_hdls);
  747. for (i = 0; i < num_hdls; i++) {
  748. /* If 36-bit enabled, check for ICP cmd buffers and map them within the shared region */
  749. if (cam_smmu_is_expanded_memory() &&
  750. cam_smmu_supports_shared_region(mmu_hdls[i]) &&
  751. ((flags & CAM_MEM_FLAG_CMD_BUF_TYPE) ||
  752. (flags & CAM_MEM_FLAG_HW_AND_CDM_OR_SHARED)))
  753. region = CAM_SMMU_REGION_SHARED;
  754. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  755. rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dmabuf, dir, hw_vaddr, len);
  756. else
  757. rc = cam_smmu_map_user_iova(mmu_hdls[i], fd, dmabuf, dis_delayed_unmap, dir,
  758. hw_vaddr, len, region, is_internal);
  759. if (rc) {
  760. CAM_ERR(CAM_MEM,
  761. "Failed %s map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
  762. (flags & CAM_MEM_FLAG_PROTECTED_MODE) ? "" : "secured",
  763. i, fd, dir, mmu_hdls[i], rc);
  764. goto multi_map_fail;
  765. }
  766. }
  767. return rc;
  768. multi_map_fail:
  769. for (--i; i>= 0; i--) {
  770. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  771. cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dmabuf);
  772. else
  773. cam_smmu_unmap_user_iova(mmu_hdls[i], fd, dmabuf, CAM_SMMU_REGION_IO);
  774. }
  775. return rc;
  776. }
  777. int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
  778. {
  779. int rc;
  780. int32_t idx;
  781. struct dma_buf *dmabuf = NULL;
  782. int fd = -1;
  783. dma_addr_t hw_vaddr = 0;
  784. size_t len;
  785. uintptr_t kvaddr = 0;
  786. size_t klen;
  787. unsigned long i_ino = 0;
  788. if (!atomic_read(&cam_mem_mgr_state)) {
  789. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  790. return -EINVAL;
  791. }
  792. if (!cmd) {
  793. CAM_ERR(CAM_MEM, " Invalid argument");
  794. return -EINVAL;
  795. }
  796. len = cmd->len;
  797. if (tbl.need_shared_buffer_padding &&
  798. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)) {
  799. len += CAM_MEM_SHARED_BUFFER_PAD_4K;
  800. CAM_DBG(CAM_MEM, "Pad 4k size, actual %llu, allocating %zu",
  801. cmd->len, len);
  802. }
  803. rc = cam_mem_util_check_alloc_flags(cmd);
  804. if (rc) {
  805. CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
  806. cmd->flags, rc);
  807. return rc;
  808. }
  809. rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd, &i_ino);
  810. if (rc) {
  811. CAM_ERR(CAM_MEM,
  812. "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
  813. len, cmd->align, cmd->flags, cmd->num_hdl);
  814. cam_mem_mgr_print_tbl();
  815. return rc;
  816. }
  817. if (!dmabuf) {
  818. CAM_ERR(CAM_MEM,
  819. "Ion Alloc return NULL dmabuf! fd=%d, i_ino=%lu, len=%d", fd, i_ino, len);
  820. cam_mem_mgr_print_tbl();
  821. return rc;
  822. }
  823. idx = cam_mem_get_slot();
  824. if (idx < 0) {
  825. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  826. rc = -ENOMEM;
  827. goto slot_fail;
  828. }
  829. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  830. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  831. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  832. enum cam_smmu_region_id region;
  833. if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  834. region = CAM_SMMU_REGION_IO;
  835. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  836. region = CAM_SMMU_REGION_SHARED;
  837. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  838. region = CAM_SMMU_REGION_IO;
  839. rc = cam_mem_util_map_hw_va(cmd->flags,
  840. cmd->mmu_hdls,
  841. cmd->num_hdl,
  842. fd,
  843. dmabuf,
  844. &hw_vaddr,
  845. &len,
  846. region,
  847. true);
  848. if (rc) {
  849. CAM_ERR(CAM_MEM,
  850. "Failed in map_hw_va len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  851. len, cmd->flags,
  852. fd, region, cmd->num_hdl, rc);
  853. if (rc == -EALREADY) {
  854. if ((size_t)dmabuf->size != len)
  855. rc = -EBADR;
  856. cam_mem_mgr_print_tbl();
  857. }
  858. goto map_hw_fail;
  859. }
  860. }
  861. mutex_lock(&tbl.bufq[idx].q_lock);
  862. tbl.bufq[idx].fd = fd;
  863. tbl.bufq[idx].i_ino = i_ino;
  864. tbl.bufq[idx].dma_buf = NULL;
  865. tbl.bufq[idx].flags = cmd->flags;
  866. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
  867. tbl.bufq[idx].is_internal = true;
  868. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  869. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  870. if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  871. rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen);
  872. if (rc) {
  873. CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d",
  874. dmabuf, rc);
  875. goto map_kernel_fail;
  876. }
  877. }
  878. if (cmd->flags & CAM_MEM_FLAG_KMD_DEBUG_FLAG)
  879. tbl.dbg_buf_idx = idx;
  880. tbl.bufq[idx].kmdvaddr = kvaddr;
  881. tbl.bufq[idx].vaddr = hw_vaddr;
  882. tbl.bufq[idx].dma_buf = dmabuf;
  883. tbl.bufq[idx].len = len;
  884. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  885. cam_mem_mgr_reset_presil_params(idx);
  886. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  887. sizeof(int32_t) * cmd->num_hdl);
  888. tbl.bufq[idx].is_imported = false;
  889. mutex_unlock(&tbl.bufq[idx].q_lock);
  890. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  891. cmd->out.fd = tbl.bufq[idx].fd;
  892. cmd->out.vaddr = 0;
  893. CAM_DBG(CAM_MEM,
  894. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu",
  895. cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  896. tbl.bufq[idx].len, tbl.bufq[idx].i_ino);
  897. return rc;
  898. map_kernel_fail:
  899. mutex_unlock(&tbl.bufq[idx].q_lock);
  900. map_hw_fail:
  901. cam_mem_put_slot(idx);
  902. slot_fail:
  903. dma_buf_put(dmabuf);
  904. return rc;
  905. }
  906. static bool cam_mem_util_is_map_internal(int32_t fd, unsigned i_ino)
  907. {
  908. uint32_t i;
  909. bool is_internal = false;
  910. mutex_lock(&tbl.m_lock);
  911. for_each_set_bit(i, tbl.bitmap, tbl.bits) {
  912. if ((tbl.bufq[i].fd == fd) && (tbl.bufq[i].i_ino == i_ino)) {
  913. is_internal = tbl.bufq[i].is_internal;
  914. break;
  915. }
  916. }
  917. mutex_unlock(&tbl.m_lock);
  918. return is_internal;
  919. }
  920. int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
  921. {
  922. int32_t idx;
  923. int rc;
  924. struct dma_buf *dmabuf;
  925. dma_addr_t hw_vaddr = 0;
  926. size_t len = 0;
  927. bool is_internal = false;
  928. unsigned long i_ino;
  929. if (!atomic_read(&cam_mem_mgr_state)) {
  930. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  931. return -EINVAL;
  932. }
  933. if (!cmd || (cmd->fd < 0)) {
  934. CAM_ERR(CAM_MEM, "Invalid argument");
  935. return -EINVAL;
  936. }
  937. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  938. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  939. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  940. return -EINVAL;
  941. }
  942. rc = cam_mem_util_check_map_flags(cmd);
  943. if (rc) {
  944. CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
  945. return rc;
  946. }
  947. dmabuf = dma_buf_get(cmd->fd);
  948. if (IS_ERR_OR_NULL((void *)(dmabuf))) {
  949. CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
  950. return -EINVAL;
  951. }
  952. i_ino = file_inode(dmabuf->file)->i_ino;
  953. is_internal = cam_mem_util_is_map_internal(cmd->fd, i_ino);
  954. idx = cam_mem_get_slot();
  955. if (idx < 0) {
  956. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d, fd=%d",
  957. idx, cmd->fd);
  958. rc = -ENOMEM;
  959. goto slot_fail;
  960. }
  961. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  962. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  963. rc = cam_mem_util_map_hw_va(cmd->flags,
  964. cmd->mmu_hdls,
  965. cmd->num_hdl,
  966. cmd->fd,
  967. dmabuf,
  968. &hw_vaddr,
  969. &len,
  970. CAM_SMMU_REGION_IO,
  971. is_internal);
  972. if (rc) {
  973. CAM_ERR(CAM_MEM,
  974. "Failed in map_hw_va, flags=0x%x, fd=%d, len=%llu, region=%d, num_hdl=%d, rc=%d",
  975. cmd->flags, cmd->fd, len,
  976. CAM_SMMU_REGION_IO, cmd->num_hdl, rc);
  977. if (rc == -EALREADY) {
  978. if ((size_t)dmabuf->size != len) {
  979. rc = -EBADR;
  980. cam_mem_mgr_print_tbl();
  981. }
  982. }
  983. goto map_fail;
  984. }
  985. }
  986. mutex_lock(&tbl.bufq[idx].q_lock);
  987. tbl.bufq[idx].fd = cmd->fd;
  988. tbl.bufq[idx].i_ino = i_ino;
  989. tbl.bufq[idx].dma_buf = NULL;
  990. tbl.bufq[idx].flags = cmd->flags;
  991. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
  992. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  993. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  994. tbl.bufq[idx].kmdvaddr = 0;
  995. if (cmd->num_hdl > 0)
  996. tbl.bufq[idx].vaddr = hw_vaddr;
  997. else
  998. tbl.bufq[idx].vaddr = 0;
  999. tbl.bufq[idx].dma_buf = dmabuf;
  1000. tbl.bufq[idx].len = len;
  1001. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  1002. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  1003. sizeof(int32_t) * cmd->num_hdl);
  1004. tbl.bufq[idx].is_imported = true;
  1005. tbl.bufq[idx].is_internal = is_internal;
  1006. mutex_unlock(&tbl.bufq[idx].q_lock);
  1007. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  1008. cmd->out.vaddr = 0;
  1009. cmd->out.size = (uint32_t)len;
  1010. CAM_DBG(CAM_MEM,
  1011. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu",
  1012. cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  1013. tbl.bufq[idx].len, tbl.bufq[idx].i_ino);
  1014. return rc;
  1015. map_fail:
  1016. cam_mem_put_slot(idx);
  1017. slot_fail:
  1018. dma_buf_put(dmabuf);
  1019. return rc;
  1020. }
  1021. static int cam_mem_util_unmap_hw_va(int32_t idx,
  1022. enum cam_smmu_region_id region,
  1023. enum cam_smmu_mapping_client client)
  1024. {
  1025. int i;
  1026. uint32_t flags;
  1027. int32_t *mmu_hdls;
  1028. int num_hdls;
  1029. int fd;
  1030. struct dma_buf *dma_buf;
  1031. unsigned long i_ino;
  1032. int rc = 0;
  1033. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1034. CAM_ERR(CAM_MEM, "Incorrect index");
  1035. return -EINVAL;
  1036. }
  1037. flags = tbl.bufq[idx].flags;
  1038. mmu_hdls = tbl.bufq[idx].hdls;
  1039. num_hdls = tbl.bufq[idx].num_hdl;
  1040. fd = tbl.bufq[idx].fd;
  1041. dma_buf = tbl.bufq[idx].dma_buf;
  1042. i_ino = tbl.bufq[idx].i_ino;
  1043. CAM_DBG(CAM_MEM,
  1044. "unmap_hw_va : idx=%d, fd=%x, i_ino=%lu flags=0x%x, num_hdls=%d, client=%d",
  1045. idx, fd, i_ino, flags, num_hdls, client);
  1046. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  1047. for (i = 0; i < num_hdls; i++) {
  1048. rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dma_buf);
  1049. if (rc < 0) {
  1050. CAM_ERR(CAM_MEM,
  1051. "Failed in secure unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, rc=%d",
  1052. i, fd, i_ino, mmu_hdls[i], rc);
  1053. goto unmap_end;
  1054. }
  1055. }
  1056. } else {
  1057. for (i = 0; i < num_hdls; i++) {
  1058. if (client == CAM_SMMU_MAPPING_USER) {
  1059. rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
  1060. fd, dma_buf, region);
  1061. } else if (client == CAM_SMMU_MAPPING_KERNEL) {
  1062. rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
  1063. tbl.bufq[idx].dma_buf, region);
  1064. } else {
  1065. CAM_ERR(CAM_MEM,
  1066. "invalid caller for unmapping : %d",
  1067. client);
  1068. rc = -EINVAL;
  1069. }
  1070. if (rc < 0) {
  1071. CAM_ERR(CAM_MEM,
  1072. "Failed in unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, region=%d, rc=%d",
  1073. i, fd, i_ino, mmu_hdls[i], region, rc);
  1074. goto unmap_end;
  1075. }
  1076. }
  1077. }
  1078. return rc;
  1079. unmap_end:
  1080. CAM_ERR(CAM_MEM, "unmapping failed");
  1081. return rc;
  1082. }
  1083. static void cam_mem_mgr_unmap_active_buf(int idx)
  1084. {
  1085. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1086. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  1087. region = CAM_SMMU_REGION_SHARED;
  1088. else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1089. region = CAM_SMMU_REGION_IO;
  1090. cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
  1091. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
  1092. cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  1093. tbl.bufq[idx].kmdvaddr);
  1094. }
  1095. static int cam_mem_mgr_cleanup_table(void)
  1096. {
  1097. int i;
  1098. mutex_lock(&tbl.m_lock);
  1099. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  1100. if (!tbl.bufq[i].active) {
  1101. CAM_DBG(CAM_MEM,
  1102. "Buffer inactive at idx=%d, continuing", i);
  1103. continue;
  1104. } else {
  1105. CAM_DBG(CAM_MEM,
  1106. "Active buffer at idx=%d, possible leak needs unmapping",
  1107. i);
  1108. cam_mem_mgr_unmap_active_buf(i);
  1109. }
  1110. mutex_lock(&tbl.bufq[i].q_lock);
  1111. if (tbl.bufq[i].dma_buf) {
  1112. dma_buf_put(tbl.bufq[i].dma_buf);
  1113. tbl.bufq[i].dma_buf = NULL;
  1114. }
  1115. tbl.bufq[i].fd = -1;
  1116. tbl.bufq[i].i_ino = 0;
  1117. tbl.bufq[i].flags = 0;
  1118. tbl.bufq[i].buf_handle = -1;
  1119. tbl.bufq[i].vaddr = 0;
  1120. tbl.bufq[i].len = 0;
  1121. memset(tbl.bufq[i].hdls, 0,
  1122. sizeof(int32_t) * tbl.bufq[i].num_hdl);
  1123. tbl.bufq[i].num_hdl = 0;
  1124. tbl.bufq[i].dma_buf = NULL;
  1125. tbl.bufq[i].active = false;
  1126. tbl.bufq[i].is_internal = false;
  1127. cam_mem_mgr_reset_presil_params(i);
  1128. mutex_unlock(&tbl.bufq[i].q_lock);
  1129. mutex_destroy(&tbl.bufq[i].q_lock);
  1130. }
  1131. bitmap_zero(tbl.bitmap, tbl.bits);
  1132. /* We need to reserve slot 0 because 0 is invalid */
  1133. set_bit(0, tbl.bitmap);
  1134. mutex_unlock(&tbl.m_lock);
  1135. return 0;
  1136. }
  1137. void cam_mem_mgr_deinit(void)
  1138. {
  1139. if (!atomic_read(&cam_mem_mgr_state))
  1140. return;
  1141. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
  1142. cam_mem_mgr_cleanup_table();
  1143. mutex_lock(&tbl.m_lock);
  1144. bitmap_zero(tbl.bitmap, tbl.bits);
  1145. kfree(tbl.bitmap);
  1146. tbl.bitmap = NULL;
  1147. tbl.dbg_buf_idx = -1;
  1148. mutex_unlock(&tbl.m_lock);
  1149. mutex_destroy(&tbl.m_lock);
  1150. }
  1151. static int cam_mem_util_unmap(int32_t idx,
  1152. enum cam_smmu_mapping_client client)
  1153. {
  1154. int rc = 0;
  1155. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1156. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1157. CAM_ERR(CAM_MEM, "Incorrect index");
  1158. return -EINVAL;
  1159. }
  1160. CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
  1161. mutex_lock(&tbl.m_lock);
  1162. if ((!tbl.bufq[idx].active) &&
  1163. (tbl.bufq[idx].vaddr) == 0) {
  1164. CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  1165. idx);
  1166. mutex_unlock(&tbl.m_lock);
  1167. return 0;
  1168. }
  1169. /* Deactivate the buffer queue to prevent multiple unmap */
  1170. mutex_lock(&tbl.bufq[idx].q_lock);
  1171. tbl.bufq[idx].active = false;
  1172. tbl.bufq[idx].vaddr = 0;
  1173. mutex_unlock(&tbl.bufq[idx].q_lock);
  1174. mutex_unlock(&tbl.m_lock);
  1175. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
  1176. if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
  1177. rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  1178. tbl.bufq[idx].kmdvaddr);
  1179. if (rc)
  1180. CAM_ERR(CAM_MEM,
  1181. "Failed, dmabuf=%pK, kmdvaddr=%pK",
  1182. tbl.bufq[idx].dma_buf,
  1183. (void *) tbl.bufq[idx].kmdvaddr);
  1184. }
  1185. }
  1186. /* SHARED flag gets precedence, all other flags after it */
  1187. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1188. region = CAM_SMMU_REGION_SHARED;
  1189. } else {
  1190. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1191. region = CAM_SMMU_REGION_IO;
  1192. }
  1193. if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  1194. (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  1195. (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  1196. if (cam_mem_util_unmap_hw_va(idx, region, client))
  1197. CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
  1198. tbl.bufq[idx].dma_buf);
  1199. /*
  1200. * Workaround as smmu driver doing put_buf without get_buf for kernel mappings
  1201. * Setting NULL here so that we dont call dma_buf_pt again below
  1202. */
  1203. if (client == CAM_SMMU_MAPPING_KERNEL)
  1204. tbl.bufq[idx].dma_buf = NULL;
  1205. }
  1206. mutex_lock(&tbl.m_lock);
  1207. mutex_lock(&tbl.bufq[idx].q_lock);
  1208. tbl.bufq[idx].flags = 0;
  1209. tbl.bufq[idx].buf_handle = -1;
  1210. memset(tbl.bufq[idx].hdls, 0,
  1211. sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
  1212. CAM_DBG(CAM_MEM,
  1213. "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK, i_ino %lu",
  1214. idx, tbl.bufq[idx].fd, tbl.bufq[idx].is_imported, tbl.bufq[idx].dma_buf,
  1215. tbl.bufq[idx].i_ino);
  1216. if (tbl.bufq[idx].dma_buf)
  1217. dma_buf_put(tbl.bufq[idx].dma_buf);
  1218. tbl.bufq[idx].fd = -1;
  1219. tbl.bufq[idx].i_ino = 0;
  1220. tbl.bufq[idx].dma_buf = NULL;
  1221. tbl.bufq[idx].is_imported = false;
  1222. tbl.bufq[idx].is_internal = false;
  1223. tbl.bufq[idx].len = 0;
  1224. tbl.bufq[idx].num_hdl = 0;
  1225. cam_mem_mgr_reset_presil_params(idx);
  1226. memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
  1227. mutex_unlock(&tbl.bufq[idx].q_lock);
  1228. mutex_destroy(&tbl.bufq[idx].q_lock);
  1229. clear_bit(idx, tbl.bitmap);
  1230. mutex_unlock(&tbl.m_lock);
  1231. return rc;
  1232. }
  1233. int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
  1234. {
  1235. int idx;
  1236. int rc;
  1237. if (!atomic_read(&cam_mem_mgr_state)) {
  1238. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1239. return -EINVAL;
  1240. }
  1241. if (!cmd) {
  1242. CAM_ERR(CAM_MEM, "Invalid argument");
  1243. return -EINVAL;
  1244. }
  1245. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  1246. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1247. CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
  1248. idx);
  1249. return -EINVAL;
  1250. }
  1251. if (!tbl.bufq[idx].active) {
  1252. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1253. return -EINVAL;
  1254. }
  1255. if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
  1256. CAM_ERR(CAM_MEM,
  1257. "Released buf handle %d not matching within table %d, idx=%d",
  1258. cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
  1259. return -EINVAL;
  1260. }
  1261. CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
  1262. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
  1263. return rc;
  1264. }
  1265. int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
  1266. struct cam_mem_mgr_memory_desc *out)
  1267. {
  1268. struct dma_buf *buf = NULL;
  1269. int ion_fd = -1;
  1270. int rc = 0;
  1271. uintptr_t kvaddr;
  1272. dma_addr_t iova = 0;
  1273. size_t request_len = 0;
  1274. uint32_t mem_handle;
  1275. int32_t idx;
  1276. int32_t smmu_hdl = 0;
  1277. int32_t num_hdl = 0;
  1278. unsigned long i_ino = 0;
  1279. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  1280. if (!atomic_read(&cam_mem_mgr_state)) {
  1281. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1282. return -EINVAL;
  1283. }
  1284. if (!inp || !out) {
  1285. CAM_ERR(CAM_MEM, "Invalid params");
  1286. return -EINVAL;
  1287. }
  1288. if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
  1289. inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
  1290. inp->flags & CAM_MEM_FLAG_CACHE)) {
  1291. CAM_ERR(CAM_MEM, "Invalid flags for request mem");
  1292. return -EINVAL;
  1293. }
  1294. rc = cam_mem_util_get_dma_buf(inp->size, inp->flags, &buf, &i_ino);
  1295. if (rc) {
  1296. CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
  1297. goto ion_fail;
  1298. } else if (!buf) {
  1299. CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer");
  1300. goto ion_fail;
  1301. } else {
  1302. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1303. }
  1304. /*
  1305. * we are mapping kva always here,
  1306. * update flags so that we do unmap properly
  1307. */
  1308. inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
  1309. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  1310. if (rc) {
  1311. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  1312. goto map_fail;
  1313. }
  1314. if (!inp->smmu_hdl) {
  1315. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1316. rc = -EINVAL;
  1317. goto smmu_fail;
  1318. }
  1319. /* SHARED flag gets precedence, all other flags after it */
  1320. if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  1321. region = CAM_SMMU_REGION_SHARED;
  1322. } else {
  1323. if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  1324. region = CAM_SMMU_REGION_IO;
  1325. }
  1326. rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
  1327. buf,
  1328. CAM_SMMU_MAP_RW,
  1329. &iova,
  1330. &request_len,
  1331. region);
  1332. if (rc < 0) {
  1333. CAM_ERR(CAM_MEM, "SMMU mapping failed");
  1334. goto smmu_fail;
  1335. }
  1336. smmu_hdl = inp->smmu_hdl;
  1337. num_hdl = 1;
  1338. idx = cam_mem_get_slot();
  1339. if (idx < 0) {
  1340. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  1341. rc = -ENOMEM;
  1342. goto slot_fail;
  1343. }
  1344. mutex_lock(&tbl.bufq[idx].q_lock);
  1345. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1346. tbl.bufq[idx].dma_buf = buf;
  1347. tbl.bufq[idx].fd = -1;
  1348. tbl.bufq[idx].i_ino = i_ino;
  1349. tbl.bufq[idx].flags = inp->flags;
  1350. tbl.bufq[idx].buf_handle = mem_handle;
  1351. tbl.bufq[idx].kmdvaddr = kvaddr;
  1352. tbl.bufq[idx].vaddr = iova;
  1353. tbl.bufq[idx].len = inp->size;
  1354. tbl.bufq[idx].num_hdl = num_hdl;
  1355. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1356. sizeof(int32_t));
  1357. tbl.bufq[idx].is_imported = false;
  1358. mutex_unlock(&tbl.bufq[idx].q_lock);
  1359. out->kva = kvaddr;
  1360. out->iova = (uint32_t)iova;
  1361. out->smmu_hdl = smmu_hdl;
  1362. out->mem_handle = mem_handle;
  1363. out->len = inp->size;
  1364. out->region = region;
  1365. CAM_DBG(CAM_MEM, "idx=%d, dmabuf=%pK, i_ino=%lu, flags=0x%x, mem_handle=0x%x",
  1366. idx, buf, i_ino, inp->flags, mem_handle);
  1367. return rc;
  1368. slot_fail:
  1369. cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
  1370. buf, region);
  1371. smmu_fail:
  1372. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  1373. map_fail:
  1374. dma_buf_put(buf);
  1375. ion_fail:
  1376. return rc;
  1377. }
  1378. EXPORT_SYMBOL(cam_mem_mgr_request_mem);
  1379. int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
  1380. {
  1381. int32_t idx;
  1382. int rc;
  1383. if (!atomic_read(&cam_mem_mgr_state)) {
  1384. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1385. return -EINVAL;
  1386. }
  1387. if (!inp) {
  1388. CAM_ERR(CAM_MEM, "Invalid argument");
  1389. return -EINVAL;
  1390. }
  1391. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1392. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1393. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1394. return -EINVAL;
  1395. }
  1396. if (!tbl.bufq[idx].active) {
  1397. if (tbl.bufq[idx].vaddr == 0) {
  1398. CAM_ERR(CAM_MEM, "buffer is released already");
  1399. return 0;
  1400. }
  1401. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1402. return -EINVAL;
  1403. }
  1404. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1405. CAM_ERR(CAM_MEM,
  1406. "Released buf handle not matching within table");
  1407. return -EINVAL;
  1408. }
  1409. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1410. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1411. return rc;
  1412. }
  1413. EXPORT_SYMBOL(cam_mem_mgr_release_mem);
  1414. int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
  1415. enum cam_smmu_region_id region,
  1416. struct cam_mem_mgr_memory_desc *out)
  1417. {
  1418. struct dma_buf *buf = NULL;
  1419. int rc = 0;
  1420. int ion_fd = -1;
  1421. dma_addr_t iova = 0;
  1422. size_t request_len = 0;
  1423. uint32_t mem_handle;
  1424. int32_t idx;
  1425. int32_t smmu_hdl = 0;
  1426. int32_t num_hdl = 0;
  1427. uintptr_t kvaddr = 0;
  1428. unsigned long i_ino = 0;
  1429. if (!atomic_read(&cam_mem_mgr_state)) {
  1430. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1431. return -EINVAL;
  1432. }
  1433. if (!inp || !out) {
  1434. CAM_ERR(CAM_MEM, "Invalid param(s)");
  1435. return -EINVAL;
  1436. }
  1437. if (!inp->smmu_hdl) {
  1438. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1439. return -EINVAL;
  1440. }
  1441. if ((region != CAM_SMMU_REGION_SECHEAP) &&
  1442. (region != CAM_SMMU_REGION_FWUNCACHED)) {
  1443. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1444. return -EINVAL;
  1445. }
  1446. rc = cam_mem_util_get_dma_buf(inp->size, 0, &buf, &i_ino);
  1447. if (rc) {
  1448. CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
  1449. goto ion_fail;
  1450. } else if (!buf) {
  1451. CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer");
  1452. goto ion_fail;
  1453. } else {
  1454. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1455. }
  1456. if (inp->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  1457. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  1458. if (rc) {
  1459. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  1460. goto kmap_fail;
  1461. }
  1462. }
  1463. rc = cam_smmu_reserve_buf_region(region,
  1464. inp->smmu_hdl, buf, &iova, &request_len);
  1465. if (rc) {
  1466. CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
  1467. goto smmu_fail;
  1468. }
  1469. smmu_hdl = inp->smmu_hdl;
  1470. num_hdl = 1;
  1471. idx = cam_mem_get_slot();
  1472. if (idx < 0) {
  1473. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  1474. rc = -ENOMEM;
  1475. goto slot_fail;
  1476. }
  1477. mutex_lock(&tbl.bufq[idx].q_lock);
  1478. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1479. tbl.bufq[idx].fd = -1;
  1480. tbl.bufq[idx].i_ino = i_ino;
  1481. tbl.bufq[idx].dma_buf = buf;
  1482. tbl.bufq[idx].flags = inp->flags;
  1483. tbl.bufq[idx].buf_handle = mem_handle;
  1484. tbl.bufq[idx].kmdvaddr = kvaddr;
  1485. tbl.bufq[idx].vaddr = iova;
  1486. tbl.bufq[idx].len = request_len;
  1487. tbl.bufq[idx].num_hdl = num_hdl;
  1488. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1489. sizeof(int32_t));
  1490. tbl.bufq[idx].is_imported = false;
  1491. mutex_unlock(&tbl.bufq[idx].q_lock);
  1492. out->kva = kvaddr;
  1493. out->iova = (uint32_t)iova;
  1494. out->smmu_hdl = smmu_hdl;
  1495. out->mem_handle = mem_handle;
  1496. out->len = request_len;
  1497. out->region = region;
  1498. return rc;
  1499. slot_fail:
  1500. cam_smmu_release_buf_region(region, smmu_hdl);
  1501. smmu_fail:
  1502. if (region == CAM_SMMU_REGION_FWUNCACHED)
  1503. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  1504. kmap_fail:
  1505. dma_buf_put(buf);
  1506. ion_fail:
  1507. return rc;
  1508. }
  1509. EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
  1510. int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
  1511. {
  1512. int32_t idx;
  1513. int rc;
  1514. int32_t smmu_hdl;
  1515. if (!atomic_read(&cam_mem_mgr_state)) {
  1516. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1517. return -EINVAL;
  1518. }
  1519. if (!inp) {
  1520. CAM_ERR(CAM_MEM, "Invalid argument");
  1521. return -EINVAL;
  1522. }
  1523. if ((inp->region != CAM_SMMU_REGION_SECHEAP) &&
  1524. (inp->region != CAM_SMMU_REGION_FWUNCACHED)) {
  1525. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1526. return -EINVAL;
  1527. }
  1528. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1529. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1530. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1531. return -EINVAL;
  1532. }
  1533. if (!tbl.bufq[idx].active) {
  1534. if (tbl.bufq[idx].vaddr == 0) {
  1535. CAM_ERR(CAM_MEM, "buffer is released already");
  1536. return 0;
  1537. }
  1538. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1539. return -EINVAL;
  1540. }
  1541. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1542. CAM_ERR(CAM_MEM,
  1543. "Released buf handle not matching within table");
  1544. return -EINVAL;
  1545. }
  1546. if (tbl.bufq[idx].num_hdl != 1) {
  1547. CAM_ERR(CAM_MEM,
  1548. "Sec heap region should have only one smmu hdl");
  1549. return -ENODEV;
  1550. }
  1551. memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
  1552. sizeof(int32_t));
  1553. if (inp->smmu_hdl != smmu_hdl) {
  1554. CAM_ERR(CAM_MEM,
  1555. "Passed SMMU handle doesn't match with internal hdl");
  1556. return -ENODEV;
  1557. }
  1558. rc = cam_smmu_release_buf_region(inp->region, inp->smmu_hdl);
  1559. if (rc) {
  1560. CAM_ERR(CAM_MEM,
  1561. "Sec heap region release failed");
  1562. return -ENODEV;
  1563. }
  1564. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1565. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1566. if (rc)
  1567. CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
  1568. return rc;
  1569. }
  1570. EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);
  1571. #ifdef CONFIG_CAM_PRESIL
  1572. struct dma_buf *cam_mem_mgr_get_dma_buf(int fd)
  1573. {
  1574. struct dma_buf *dmabuf = NULL;
  1575. dmabuf = dma_buf_get(fd);
  1576. if (IS_ERR_OR_NULL((void *)(dmabuf))) {
  1577. CAM_ERR(CAM_MEM, "Failed to import dma_buf for fd");
  1578. return NULL;
  1579. }
  1580. CAM_INFO(CAM_PRESIL, "Received DMA Buf* %pK", dmabuf);
  1581. return dmabuf;
  1582. }
  1583. int cam_presil_put_dmabuf_from_fd(uint64_t input_dmabuf)
  1584. {
  1585. struct dma_buf *dmabuf = (struct dma_buf *)(uint64_t)input_dmabuf;
  1586. int idx = 0;
  1587. CAM_INFO(CAM_PRESIL, "Received dma_buf :%pK", dmabuf);
  1588. if (!dmabuf) {
  1589. CAM_ERR(CAM_PRESIL, "NULL to import dma_buf fd");
  1590. return -EINVAL;
  1591. }
  1592. for (idx = 0; idx < CAM_MEM_BUFQ_MAX; idx++) {
  1593. if ((tbl.bufq[idx].dma_buf != NULL) && (tbl.bufq[idx].dma_buf == dmabuf)) {
  1594. if (tbl.bufq[idx].presil_params.refcount)
  1595. tbl.bufq[idx].presil_params.refcount--;
  1596. else
  1597. CAM_ERR(CAM_PRESIL, "Unbalanced dmabuf put: %pK", dmabuf);
  1598. if (!tbl.bufq[idx].presil_params.refcount) {
  1599. dma_buf_put(dmabuf);
  1600. cam_mem_mgr_reset_presil_params(idx);
  1601. CAM_DBG(CAM_PRESIL, "Done dma_buf_put for %pK", dmabuf);
  1602. }
  1603. }
  1604. }
  1605. return 0;
  1606. }
  1607. EXPORT_SYMBOL(cam_presil_put_dmabuf_from_fd);
  1608. int cam_presil_get_fd_from_dmabuf(uint64_t input_dmabuf)
  1609. {
  1610. int fd_for_dmabuf = -1;
  1611. struct dma_buf *dmabuf = (struct dma_buf *)(uint64_t)input_dmabuf;
  1612. int idx = 0;
  1613. CAM_DBG(CAM_PRESIL, "Received dma_buf :%pK", dmabuf);
  1614. if (!dmabuf) {
  1615. CAM_ERR(CAM_PRESIL, "NULL to import dma_buf fd");
  1616. return -EINVAL;
  1617. }
  1618. for (idx = 0; idx < CAM_MEM_BUFQ_MAX; idx++) {
  1619. if ((tbl.bufq[idx].dma_buf != NULL) && (tbl.bufq[idx].dma_buf == dmabuf)) {
  1620. CAM_DBG(CAM_PRESIL,
  1621. "Found entry for request from Presil UMD Daemon at %d, dmabuf %pK fd_for_umd_daemon %d refcount: %d",
  1622. idx, tbl.bufq[idx].dma_buf,
  1623. tbl.bufq[idx].presil_params.fd_for_umd_daemon,
  1624. tbl.bufq[idx].presil_params.refcount);
  1625. if (tbl.bufq[idx].presil_params.fd_for_umd_daemon < 0) {
  1626. fd_for_dmabuf = dma_buf_fd(dmabuf, O_CLOEXEC);
  1627. if (fd_for_dmabuf < 0) {
  1628. CAM_ERR(CAM_PRESIL, "get fd fail, fd_for_dmabuf=%d",
  1629. fd_for_dmabuf);
  1630. return -EINVAL;
  1631. }
  1632. tbl.bufq[idx].presil_params.fd_for_umd_daemon = fd_for_dmabuf;
  1633. CAM_INFO(CAM_PRESIL,
  1634. "Received generated idx %d fd_for_dmabuf Buf* %lld", idx,
  1635. fd_for_dmabuf);
  1636. } else {
  1637. fd_for_dmabuf = tbl.bufq[idx].presil_params.fd_for_umd_daemon;
  1638. CAM_INFO(CAM_PRESIL,
  1639. "Received existing at idx %d fd_for_dmabuf Buf* %lld", idx,
  1640. fd_for_dmabuf);
  1641. }
  1642. tbl.bufq[idx].presil_params.refcount++;
  1643. } else {
  1644. CAM_DBG(CAM_MEM,
  1645. "Not found dmabuf at idx=%d, dma_buf %pK handle 0x%0x active %d ",
  1646. idx, tbl.bufq[idx].dma_buf, tbl.bufq[idx].buf_handle,
  1647. tbl.bufq[idx].active);
  1648. }
  1649. }
  1650. return (int)fd_for_dmabuf;
  1651. }
  1652. EXPORT_SYMBOL(cam_presil_get_fd_from_dmabuf);
  1653. int cam_mem_mgr_send_buffer_to_presil(int32_t iommu_hdl, int32_t buf_handle)
  1654. {
  1655. int rc = 0;
  1656. /* Sending Presil IO Buf to PC side ( as iova start address indicates) */
  1657. uint64_t io_buf_addr;
  1658. size_t io_buf_size;
  1659. int i, j, fd = -1, idx = 0;
  1660. uint8_t *iova_ptr = NULL;
  1661. uint64_t dmabuf = 0;
  1662. bool is_mapped_in_cb = false;
  1663. CAM_DBG(CAM_PRESIL, "buf handle 0x%0x", buf_handle);
  1664. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  1665. for (i = 0; i < tbl.bufq[idx].num_hdl; i++) {
  1666. if (tbl.bufq[idx].hdls[i] == iommu_hdl)
  1667. is_mapped_in_cb = true;
  1668. }
  1669. if (!is_mapped_in_cb) {
  1670. for (j = 0; j < CAM_MEM_BUFQ_MAX; j++) {
  1671. if (tbl.bufq[j].i_ino == tbl.bufq[idx].i_ino) {
  1672. for (i = 0; i < tbl.bufq[j].num_hdl; i++) {
  1673. if (tbl.bufq[j].hdls[i] == iommu_hdl)
  1674. is_mapped_in_cb = true;
  1675. }
  1676. }
  1677. }
  1678. if (!is_mapped_in_cb) {
  1679. CAM_DBG(CAM_PRESIL,
  1680. "Still Could not find idx=%d, FD %d buf_handle 0x%0x",
  1681. idx, GET_FD_FROM_HANDLE(buf_handle), buf_handle);
  1682. /*
  1683. * Okay to return 0, since this function also gets called for buffers that
  1684. * are shared only between umd/kmd, these may not be mapped with smmu
  1685. */
  1686. return 0;
  1687. }
  1688. }
  1689. if ((tbl.bufq[idx].buf_handle != 0) && (tbl.bufq[idx].active) &&
  1690. (tbl.bufq[idx].buf_handle == buf_handle)) {
  1691. CAM_DBG(CAM_PRESIL,
  1692. "Found dmabuf in bufq idx %d, FD %d handle 0x%0x dmabuf %pK",
  1693. idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].dma_buf);
  1694. dmabuf = (uint64_t)tbl.bufq[idx].dma_buf;
  1695. fd = tbl.bufq[idx].fd;
  1696. } else {
  1697. CAM_ERR(CAM_PRESIL,
  1698. "Could not find dmabuf Invalid Mem idx=%d, FD %d handle 0x%0x active %d",
  1699. idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].active);
  1700. return -EINVAL;
  1701. }
  1702. rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size, NULL);
  1703. if (rc || NULL == (void *)io_buf_addr) {
  1704. CAM_DBG(CAM_PRESIL, "Invalid ioaddr : 0x%x, fd = %d, dmabuf = %pK",
  1705. io_buf_addr, fd, dmabuf);
  1706. return -EINVAL;
  1707. }
  1708. iova_ptr = (uint8_t *)io_buf_addr;
  1709. CAM_INFO(CAM_PRESIL, "Sending buffer with ioaddr : 0x%x, fd = %d, dmabuf = %pK",
  1710. io_buf_addr, fd, dmabuf);
  1711. rc = cam_presil_send_buffer(dmabuf, 0, 0, (uint32_t)io_buf_size, (uint64_t)iova_ptr);
  1712. return rc;
  1713. }
  1714. int cam_mem_mgr_send_all_buffers_to_presil(int32_t iommu_hdl)
  1715. {
  1716. int idx = 0;
  1717. int rc = 0;
  1718. int32_t fd_already_sent[128];
  1719. int fd_already_sent_count = 0;
  1720. int fd_already_index = 0;
  1721. int fd_already_sent_found = 0;
  1722. memset(&fd_already_sent, 0x0, sizeof(fd_already_sent));
  1723. for (idx = 0; idx < CAM_MEM_BUFQ_MAX; idx++) {
  1724. if ((tbl.bufq[idx].buf_handle != 0) && (tbl.bufq[idx].active)) {
  1725. CAM_DBG(CAM_PRESIL, "Sending %d, FD %d handle 0x%0x", idx, tbl.bufq[idx].fd,
  1726. tbl.bufq[idx].buf_handle);
  1727. fd_already_sent_found = 0;
  1728. for (fd_already_index = 0; fd_already_index < fd_already_sent_count;
  1729. fd_already_index++) {
  1730. if (fd_already_sent[fd_already_index] == tbl.bufq[idx].fd) {
  1731. fd_already_sent_found = 1;
  1732. CAM_DBG(CAM_PRESIL,
  1733. "fd_already_sent %d, FD %d handle 0x%0x flags=0x%0x",
  1734. idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle,
  1735. tbl.bufq[idx].flags);
  1736. }
  1737. }
  1738. if (fd_already_sent_found)
  1739. continue;
  1740. CAM_DBG(CAM_PRESIL, "Sending %d, FD %d handle 0x%0x flags=0x%0x", idx,
  1741. tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].flags);
  1742. rc = cam_mem_mgr_send_buffer_to_presil(iommu_hdl, tbl.bufq[idx].buf_handle);
  1743. fd_already_sent[fd_already_sent_count++] = tbl.bufq[idx].fd;
  1744. } else {
  1745. CAM_DBG(CAM_PRESIL, "Invalid Mem idx=%d, FD %d handle 0x%0x active %d",
  1746. idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle,
  1747. tbl.bufq[idx].active);
  1748. }
  1749. }
  1750. return rc;
  1751. }
  1752. EXPORT_SYMBOL(cam_mem_mgr_send_all_buffers_to_presil);
  1753. int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle, uint32_t buf_size,
  1754. uint32_t offset, int32_t iommu_hdl)
  1755. {
  1756. int rc = 0;
  1757. /* Receive output buffer from Presil IO Buf to PC side (as iova start address indicates) */
  1758. uint64_t io_buf_addr;
  1759. size_t io_buf_size;
  1760. uint64_t dmabuf = 0;
  1761. int fd = 0;
  1762. uint8_t *iova_ptr = NULL;
  1763. int idx = 0;
  1764. CAM_DBG(CAM_PRESIL, "buf handle 0x%0x ", buf_handle);
  1765. rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size, NULL);
  1766. if (rc) {
  1767. CAM_ERR(CAM_PRESIL, "Unable to get IOVA for buffer buf_hdl: 0x%0x iommu_hdl: 0x%0x",
  1768. buf_handle, iommu_hdl);
  1769. return -EINVAL;
  1770. }
  1771. iova_ptr = (uint8_t *)io_buf_addr;
  1772. iova_ptr += offset; // correct target address to start writing buffer to.
  1773. if (!buf_size) {
  1774. buf_size = io_buf_size;
  1775. CAM_DBG(CAM_PRESIL, "Updated buf_size from Zero to 0x%0x", buf_size);
  1776. }
  1777. fd = GET_FD_FROM_HANDLE(buf_handle);
  1778. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  1779. if ((tbl.bufq[idx].buf_handle != 0) && (tbl.bufq[idx].active) &&
  1780. (tbl.bufq[idx].buf_handle == buf_handle)) {
  1781. CAM_DBG(CAM_PRESIL, "Found dmabuf in bufq idx %d, FD %d handle 0x%0x dmabuf %pK",
  1782. idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].dma_buf);
  1783. dmabuf = (uint64_t)tbl.bufq[idx].dma_buf;
  1784. } else {
  1785. CAM_ERR(CAM_PRESIL,
  1786. "Could not find dmabuf Invalid Mem idx=%d, FD %d handle 0x%0x active %d ",
  1787. idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].active);
  1788. }
  1789. CAM_DBG(CAM_PRESIL,
  1790. "Retrieving buffer with ioaddr : 0x%x, offset = %d, size = %d, fd = %d, dmabuf = %pK",
  1791. io_buf_addr, offset, buf_size, fd, dmabuf);
  1792. rc = cam_presil_retrieve_buffer(dmabuf, 0, 0, (uint32_t)buf_size, (uint64_t)io_buf_addr);
  1793. CAM_INFO(CAM_PRESIL,
  1794. "Retrieved buffer with ioaddr : 0x%x, offset = %d, size = %d, fd = %d, dmabuf = %pK",
  1795. io_buf_addr, 0, buf_size, fd, dmabuf);
  1796. return rc;
  1797. }
  1798. #else /* ifdef CONFIG_CAM_PRESIL */
  1799. struct dma_buf * cam_mem_mgr_get_dma_buf(int fd)
  1800. {
  1801. return NULL;
  1802. }
  1803. int cam_mem_mgr_send_all_buffers_to_presil(int32_t iommu_hdl)
  1804. {
  1805. return 0;
  1806. }
  1807. int cam_mem_mgr_send_buffer_to_presil(int32_t iommu_hdl, int32_t buf_handle)
  1808. {
  1809. return 0;
  1810. }
  1811. int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle,
  1812. uint32_t buf_size,
  1813. uint32_t offset,
  1814. int32_t iommu_hdl)
  1815. {
  1816. return 0;
  1817. }
  1818. #endif /* ifdef CONFIG_CAM_PRESIL */