cam_mem_mgr.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/types.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/version.h>
  11. #if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
  12. #include <linux/ion_kernel.h>
  13. #endif
  14. #include "cam_compat.h"
  15. #include "cam_req_mgr_util.h"
  16. #include "cam_mem_mgr.h"
  17. #include "cam_smmu_api.h"
  18. #include "cam_debug_util.h"
  19. static struct cam_mem_table tbl;
  20. static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
  21. static int cam_mem_util_get_dma_dir(uint32_t flags)
  22. {
  23. int rc = -EINVAL;
  24. if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
  25. rc = DMA_TO_DEVICE;
  26. else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
  27. rc = DMA_FROM_DEVICE;
  28. else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
  29. rc = DMA_BIDIRECTIONAL;
  30. else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  31. rc = DMA_BIDIRECTIONAL;
  32. return rc;
  33. }
  34. static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
  35. uintptr_t *vaddr,
  36. size_t *len)
  37. {
  38. int i, j, rc;
  39. void *addr;
  40. /*
  41. * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  42. * need to be called in pair to avoid stability issue.
  43. */
  44. rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  45. if (rc) {
  46. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  47. return rc;
  48. }
  49. /*
  50. * Code could be simplified if ION support of dma_buf_vmap is
  51. * available. This workaround takes the avandaage that ion_alloc
  52. * returns a virtually contiguous memory region, so we just need
  53. * to _kmap each individual page and then only use the virtual
  54. * address returned from the first call to _kmap.
  55. */
  56. for (i = 0; i < PAGE_ALIGN(dmabuf->size) / PAGE_SIZE; i++) {
  57. addr = dma_buf_kmap(dmabuf, i);
  58. if (IS_ERR_OR_NULL(addr)) {
  59. CAM_ERR(CAM_MEM, "kernel map fail");
  60. for (j = 0; j < i; j++)
  61. dma_buf_kunmap(dmabuf,
  62. j,
  63. (void *)(*vaddr + (j * PAGE_SIZE)));
  64. *vaddr = 0;
  65. *len = 0;
  66. rc = -ENOSPC;
  67. goto fail;
  68. }
  69. if (i == 0)
  70. *vaddr = (uint64_t)addr;
  71. }
  72. *len = dmabuf->size;
  73. return 0;
  74. fail:
  75. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  76. return rc;
  77. }
  78. static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
  79. uint64_t vaddr)
  80. {
  81. int i, rc = 0, page_num;
  82. if (!dmabuf || !vaddr) {
  83. CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
  84. return -EINVAL;
  85. }
  86. page_num = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;
  87. for (i = 0; i < page_num; i++) {
  88. dma_buf_kunmap(dmabuf, i,
  89. (void *)(vaddr + (i * PAGE_SIZE)));
  90. }
  91. /*
  92. * dma_buf_begin_cpu_access() and
  93. * dma_buf_end_cpu_access() need to be called in pair
  94. * to avoid stability issue.
  95. */
  96. rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  97. if (rc) {
  98. CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
  99. dmabuf);
  100. return rc;
  101. }
  102. return rc;
  103. }
  104. int cam_mem_mgr_init(void)
  105. {
  106. int i;
  107. int bitmap_size;
  108. memset(tbl.bufq, 0, sizeof(tbl.bufq));
  109. bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
  110. tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  111. if (!tbl.bitmap)
  112. return -ENOMEM;
  113. tbl.bits = bitmap_size * BITS_PER_BYTE;
  114. bitmap_zero(tbl.bitmap, tbl.bits);
  115. /* We need to reserve slot 0 because 0 is invalid */
  116. set_bit(0, tbl.bitmap);
  117. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  118. tbl.bufq[i].fd = -1;
  119. tbl.bufq[i].buf_handle = -1;
  120. }
  121. mutex_init(&tbl.m_lock);
  122. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
  123. return 0;
  124. }
  125. static int32_t cam_mem_get_slot(void)
  126. {
  127. int32_t idx;
  128. mutex_lock(&tbl.m_lock);
  129. idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
  130. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  131. mutex_unlock(&tbl.m_lock);
  132. return -ENOMEM;
  133. }
  134. set_bit(idx, tbl.bitmap);
  135. tbl.bufq[idx].active = true;
  136. mutex_init(&tbl.bufq[idx].q_lock);
  137. mutex_unlock(&tbl.m_lock);
  138. return idx;
  139. }
  140. static void cam_mem_put_slot(int32_t idx)
  141. {
  142. mutex_lock(&tbl.m_lock);
  143. mutex_lock(&tbl.bufq[idx].q_lock);
  144. tbl.bufq[idx].active = false;
  145. mutex_unlock(&tbl.bufq[idx].q_lock);
  146. mutex_destroy(&tbl.bufq[idx].q_lock);
  147. clear_bit(idx, tbl.bitmap);
  148. mutex_unlock(&tbl.m_lock);
  149. }
  150. int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
  151. dma_addr_t *iova_ptr, size_t *len_ptr)
  152. {
  153. int rc = 0, idx;
  154. *len_ptr = 0;
  155. if (!atomic_read(&cam_mem_mgr_state)) {
  156. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  157. return -EINVAL;
  158. }
  159. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  160. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  161. return -ENOENT;
  162. if (!tbl.bufq[idx].active)
  163. return -EAGAIN;
  164. mutex_lock(&tbl.bufq[idx].q_lock);
  165. if (buf_handle != tbl.bufq[idx].buf_handle) {
  166. rc = -EINVAL;
  167. goto handle_mismatch;
  168. }
  169. if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
  170. rc = cam_smmu_get_stage2_iova(mmu_handle,
  171. tbl.bufq[idx].fd,
  172. iova_ptr,
  173. len_ptr);
  174. else
  175. rc = cam_smmu_get_iova(mmu_handle,
  176. tbl.bufq[idx].fd,
  177. iova_ptr,
  178. len_ptr);
  179. if (rc) {
  180. CAM_ERR(CAM_MEM,
  181. "fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d",
  182. buf_handle, mmu_handle, tbl.bufq[idx].fd);
  183. goto handle_mismatch;
  184. }
  185. CAM_DBG(CAM_MEM,
  186. "handle:0x%x fd:%d iova_ptr:%pK len_ptr:%llu",
  187. mmu_handle, tbl.bufq[idx].fd, iova_ptr, *len_ptr);
  188. handle_mismatch:
  189. mutex_unlock(&tbl.bufq[idx].q_lock);
  190. return rc;
  191. }
  192. EXPORT_SYMBOL(cam_mem_get_io_buf);
  193. int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
  194. {
  195. int idx;
  196. if (!atomic_read(&cam_mem_mgr_state)) {
  197. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  198. return -EINVAL;
  199. }
  200. if (!atomic_read(&cam_mem_mgr_state)) {
  201. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  202. return -EINVAL;
  203. }
  204. if (!buf_handle || !vaddr_ptr || !len)
  205. return -EINVAL;
  206. idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
  207. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  208. return -EINVAL;
  209. if (!tbl.bufq[idx].active)
  210. return -EPERM;
  211. if (buf_handle != tbl.bufq[idx].buf_handle)
  212. return -EINVAL;
  213. if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
  214. return -EINVAL;
  215. if (tbl.bufq[idx].kmdvaddr) {
  216. *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
  217. *len = tbl.bufq[idx].len;
  218. } else {
  219. CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
  220. buf_handle);
  221. return -EINVAL;
  222. }
  223. return 0;
  224. }
  225. EXPORT_SYMBOL(cam_mem_get_cpu_buf);
  226. int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
  227. {
  228. int rc = 0, idx;
  229. uint32_t cache_dir;
  230. unsigned long dmabuf_flag = 0;
  231. if (!atomic_read(&cam_mem_mgr_state)) {
  232. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  233. return -EINVAL;
  234. }
  235. if (!cmd)
  236. return -EINVAL;
  237. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  238. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
  239. return -EINVAL;
  240. mutex_lock(&tbl.bufq[idx].q_lock);
  241. if (!tbl.bufq[idx].active) {
  242. rc = -EINVAL;
  243. goto end;
  244. }
  245. if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
  246. rc = -EINVAL;
  247. goto end;
  248. }
  249. rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
  250. if (rc) {
  251. CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
  252. goto end;
  253. }
  254. if (dmabuf_flag & ION_FLAG_CACHED) {
  255. switch (cmd->mem_cache_ops) {
  256. case CAM_MEM_CLEAN_CACHE:
  257. cache_dir = DMA_TO_DEVICE;
  258. break;
  259. case CAM_MEM_INV_CACHE:
  260. cache_dir = DMA_FROM_DEVICE;
  261. break;
  262. case CAM_MEM_CLEAN_INV_CACHE:
  263. cache_dir = DMA_BIDIRECTIONAL;
  264. break;
  265. default:
  266. CAM_ERR(CAM_MEM,
  267. "invalid cache ops :%d", cmd->mem_cache_ops);
  268. rc = -EINVAL;
  269. goto end;
  270. }
  271. } else {
  272. CAM_DBG(CAM_MEM, "BUF is not cached");
  273. goto end;
  274. }
  275. rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
  276. (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
  277. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  278. if (rc) {
  279. CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
  280. goto end;
  281. }
  282. rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
  283. cache_dir);
  284. if (rc) {
  285. CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
  286. goto end;
  287. }
  288. end:
  289. mutex_unlock(&tbl.bufq[idx].q_lock);
  290. return rc;
  291. }
  292. EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
  293. static int cam_mem_util_get_dma_buf(size_t len,
  294. unsigned int heap_id_mask,
  295. unsigned int flags,
  296. struct dma_buf **buf)
  297. {
  298. int rc = 0;
  299. if (!buf) {
  300. CAM_ERR(CAM_MEM, "Invalid params");
  301. return -EINVAL;
  302. }
  303. *buf = ion_alloc(len, heap_id_mask, flags);
  304. if (IS_ERR_OR_NULL(*buf))
  305. return -ENOMEM;
  306. return rc;
  307. }
  308. static int cam_mem_util_get_dma_buf_fd(size_t len,
  309. size_t align,
  310. unsigned int heap_id_mask,
  311. unsigned int flags,
  312. struct dma_buf **buf,
  313. int *fd)
  314. {
  315. struct dma_buf *dmabuf = NULL;
  316. int rc = 0;
  317. if (!buf || !fd) {
  318. CAM_ERR(CAM_MEM, "Invalid params, buf=%pK, fd=%pK", buf, fd);
  319. return -EINVAL;
  320. }
  321. *buf = ion_alloc(len, heap_id_mask, flags);
  322. if (IS_ERR_OR_NULL(*buf))
  323. return -ENOMEM;
  324. *fd = dma_buf_fd(*buf, O_CLOEXEC);
  325. if (*fd < 0) {
  326. CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
  327. rc = -EINVAL;
  328. goto get_fd_fail;
  329. }
  330. /*
  331. * increment the ref count so that ref count becomes 2 here
  332. * when we close fd, refcount becomes 1 and when we do
  333. * dmap_put_buf, ref count becomes 0 and memory will be freed.
  334. */
  335. dmabuf = dma_buf_get(*fd);
  336. if (IS_ERR_OR_NULL(dmabuf)) {
  337. CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d", *fd);
  338. rc = -EINVAL;
  339. }
  340. return rc;
  341. get_fd_fail:
  342. dma_buf_put(*buf);
  343. return rc;
  344. }
  345. static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
  346. struct dma_buf **dmabuf,
  347. int *fd)
  348. {
  349. uint32_t heap_id;
  350. uint32_t ion_flag = 0;
  351. int rc;
  352. if ((cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
  353. (cmd->flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
  354. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  355. ion_flag |=
  356. ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
  357. } else if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  358. heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
  359. ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
  360. } else {
  361. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  362. ION_HEAP(ION_CAMERA_HEAP_ID);
  363. }
  364. if (cmd->flags & CAM_MEM_FLAG_CACHE)
  365. ion_flag |= ION_FLAG_CACHED;
  366. else
  367. ion_flag &= ~ION_FLAG_CACHED;
  368. rc = cam_mem_util_get_dma_buf_fd(cmd->len,
  369. cmd->align,
  370. heap_id,
  371. ion_flag,
  372. dmabuf,
  373. fd);
  374. return rc;
  375. }
  376. static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
  377. {
  378. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  379. CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
  380. CAM_MEM_MMU_MAX_HANDLE);
  381. return -EINVAL;
  382. }
  383. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  384. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  385. CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
  386. return -EINVAL;
  387. }
  388. return 0;
  389. }
  390. static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
  391. {
  392. if (!cmd->flags) {
  393. CAM_ERR(CAM_MEM, "Invalid flags");
  394. return -EINVAL;
  395. }
  396. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  397. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  398. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  399. return -EINVAL;
  400. }
  401. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
  402. cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  403. CAM_ERR(CAM_MEM,
  404. "Kernel mapping in secure mode not allowed, flags=0x%x",
  405. cmd->flags);
  406. return -EINVAL;
  407. }
  408. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  409. CAM_ERR(CAM_MEM,
  410. "Shared memory buffers are not allowed to be mapped");
  411. return -EINVAL;
  412. }
  413. return 0;
  414. }
  415. static int cam_mem_util_map_hw_va(uint32_t flags,
  416. int32_t *mmu_hdls,
  417. int32_t num_hdls,
  418. int fd,
  419. dma_addr_t *hw_vaddr,
  420. size_t *len,
  421. enum cam_smmu_region_id region)
  422. {
  423. int i;
  424. int rc = -1;
  425. int dir = cam_mem_util_get_dma_dir(flags);
  426. if (dir < 0) {
  427. CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
  428. return dir;
  429. }
  430. CAM_DBG(CAM_MEM,
  431. "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d",
  432. fd, flags, dir, num_hdls);
  433. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  434. for (i = 0; i < num_hdls; i++) {
  435. rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
  436. fd,
  437. dir,
  438. hw_vaddr,
  439. len);
  440. if (rc < 0) {
  441. CAM_ERR(CAM_MEM,
  442. "Failed to securely map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
  443. i, fd, dir, mmu_hdls[i], rc);
  444. goto multi_map_fail;
  445. }
  446. }
  447. } else {
  448. for (i = 0; i < num_hdls; i++) {
  449. rc = cam_smmu_map_user_iova(mmu_hdls[i],
  450. fd,
  451. dir,
  452. (dma_addr_t *)hw_vaddr,
  453. len,
  454. region);
  455. if (rc < 0) {
  456. CAM_ERR(CAM_MEM,
  457. "Failed to map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, region=%d, rc=%d",
  458. i, fd, dir, mmu_hdls[i], region, rc);
  459. goto multi_map_fail;
  460. }
  461. }
  462. }
  463. return rc;
  464. multi_map_fail:
  465. if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
  466. for (--i; i > 0; i--)
  467. cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
  468. else
  469. for (--i; i > 0; i--)
  470. cam_smmu_unmap_user_iova(mmu_hdls[i],
  471. fd,
  472. CAM_SMMU_REGION_IO);
  473. return rc;
  474. }
  475. int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
  476. {
  477. int rc;
  478. int32_t idx;
  479. struct dma_buf *dmabuf = NULL;
  480. int fd = -1;
  481. dma_addr_t hw_vaddr = 0;
  482. size_t len;
  483. uintptr_t kvaddr = 0;
  484. size_t klen;
  485. if (!atomic_read(&cam_mem_mgr_state)) {
  486. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  487. return -EINVAL;
  488. }
  489. if (!cmd) {
  490. CAM_ERR(CAM_MEM, " Invalid argument");
  491. return -EINVAL;
  492. }
  493. len = cmd->len;
  494. rc = cam_mem_util_check_alloc_flags(cmd);
  495. if (rc) {
  496. CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
  497. cmd->flags, rc);
  498. return rc;
  499. }
  500. rc = cam_mem_util_ion_alloc(cmd,
  501. &dmabuf,
  502. &fd);
  503. if (rc) {
  504. CAM_ERR(CAM_MEM,
  505. "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
  506. cmd->len, cmd->align, cmd->flags, cmd->num_hdl);
  507. return rc;
  508. }
  509. idx = cam_mem_get_slot();
  510. if (idx < 0) {
  511. CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
  512. rc = -ENOMEM;
  513. goto slot_fail;
  514. }
  515. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  516. (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  517. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  518. enum cam_smmu_region_id region;
  519. if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  520. region = CAM_SMMU_REGION_IO;
  521. if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  522. region = CAM_SMMU_REGION_SHARED;
  523. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  524. region = CAM_SMMU_REGION_SECHEAP;
  525. rc = cam_mem_util_map_hw_va(cmd->flags,
  526. cmd->mmu_hdls,
  527. cmd->num_hdl,
  528. fd,
  529. &hw_vaddr,
  530. &len,
  531. region);
  532. if (rc) {
  533. CAM_ERR(CAM_MEM,
  534. "Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  535. cmd->flags, fd, region, cmd->num_hdl, rc);
  536. goto map_hw_fail;
  537. }
  538. }
  539. mutex_lock(&tbl.bufq[idx].q_lock);
  540. tbl.bufq[idx].fd = fd;
  541. tbl.bufq[idx].dma_buf = NULL;
  542. tbl.bufq[idx].flags = cmd->flags;
  543. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
  544. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  545. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  546. if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
  547. rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen);
  548. if (rc) {
  549. CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d",
  550. dmabuf, rc);
  551. goto map_kernel_fail;
  552. }
  553. }
  554. tbl.bufq[idx].kmdvaddr = kvaddr;
  555. tbl.bufq[idx].vaddr = hw_vaddr;
  556. tbl.bufq[idx].dma_buf = dmabuf;
  557. tbl.bufq[idx].len = cmd->len;
  558. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  559. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  560. sizeof(int32_t) * cmd->num_hdl);
  561. tbl.bufq[idx].is_imported = false;
  562. mutex_unlock(&tbl.bufq[idx].q_lock);
  563. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  564. cmd->out.fd = tbl.bufq[idx].fd;
  565. cmd->out.vaddr = 0;
  566. CAM_DBG(CAM_MEM,
  567. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
  568. cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  569. tbl.bufq[idx].len);
  570. return rc;
  571. map_kernel_fail:
  572. mutex_unlock(&tbl.bufq[idx].q_lock);
  573. map_hw_fail:
  574. cam_mem_put_slot(idx);
  575. slot_fail:
  576. dma_buf_put(dmabuf);
  577. return rc;
  578. }
  579. int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
  580. {
  581. int32_t idx;
  582. int rc;
  583. struct dma_buf *dmabuf;
  584. dma_addr_t hw_vaddr = 0;
  585. size_t len = 0;
  586. if (!atomic_read(&cam_mem_mgr_state)) {
  587. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  588. return -EINVAL;
  589. }
  590. if (!cmd || (cmd->fd < 0)) {
  591. CAM_ERR(CAM_MEM, "Invalid argument");
  592. return -EINVAL;
  593. }
  594. if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
  595. CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
  596. cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
  597. return -EINVAL;
  598. }
  599. rc = cam_mem_util_check_map_flags(cmd);
  600. if (rc) {
  601. CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
  602. return rc;
  603. }
  604. dmabuf = dma_buf_get(cmd->fd);
  605. if (IS_ERR_OR_NULL((void *)(dmabuf))) {
  606. CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
  607. return -EINVAL;
  608. }
  609. if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  610. (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  611. rc = cam_mem_util_map_hw_va(cmd->flags,
  612. cmd->mmu_hdls,
  613. cmd->num_hdl,
  614. cmd->fd,
  615. &hw_vaddr,
  616. &len,
  617. CAM_SMMU_REGION_IO);
  618. if (rc) {
  619. CAM_ERR(CAM_MEM,
  620. "Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
  621. cmd->flags, cmd->fd, CAM_SMMU_REGION_IO,
  622. cmd->num_hdl, rc);
  623. goto map_fail;
  624. }
  625. }
  626. idx = cam_mem_get_slot();
  627. if (idx < 0) {
  628. rc = -ENOMEM;
  629. goto map_fail;
  630. }
  631. mutex_lock(&tbl.bufq[idx].q_lock);
  632. tbl.bufq[idx].fd = cmd->fd;
  633. tbl.bufq[idx].dma_buf = NULL;
  634. tbl.bufq[idx].flags = cmd->flags;
  635. tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
  636. if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
  637. CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
  638. tbl.bufq[idx].kmdvaddr = 0;
  639. if (cmd->num_hdl > 0)
  640. tbl.bufq[idx].vaddr = hw_vaddr;
  641. else
  642. tbl.bufq[idx].vaddr = 0;
  643. tbl.bufq[idx].dma_buf = dmabuf;
  644. tbl.bufq[idx].len = len;
  645. tbl.bufq[idx].num_hdl = cmd->num_hdl;
  646. memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
  647. sizeof(int32_t) * cmd->num_hdl);
  648. tbl.bufq[idx].is_imported = true;
  649. mutex_unlock(&tbl.bufq[idx].q_lock);
  650. cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
  651. cmd->out.vaddr = 0;
  652. CAM_DBG(CAM_MEM,
  653. "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
  654. cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
  655. tbl.bufq[idx].len);
  656. return rc;
  657. map_fail:
  658. dma_buf_put(dmabuf);
  659. return rc;
  660. }
  661. static int cam_mem_util_unmap_hw_va(int32_t idx,
  662. enum cam_smmu_region_id region,
  663. enum cam_smmu_mapping_client client)
  664. {
  665. int i;
  666. uint32_t flags;
  667. int32_t *mmu_hdls;
  668. int num_hdls;
  669. int fd;
  670. int rc = 0;
  671. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  672. CAM_ERR(CAM_MEM, "Incorrect index");
  673. return -EINVAL;
  674. }
  675. flags = tbl.bufq[idx].flags;
  676. mmu_hdls = tbl.bufq[idx].hdls;
  677. num_hdls = tbl.bufq[idx].num_hdl;
  678. fd = tbl.bufq[idx].fd;
  679. CAM_DBG(CAM_MEM,
  680. "unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d",
  681. idx, fd, flags, num_hdls, client);
  682. if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
  683. for (i = 0; i < num_hdls; i++) {
  684. rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
  685. if (rc < 0) {
  686. CAM_ERR(CAM_MEM,
  687. "Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d",
  688. i, fd, mmu_hdls[i], rc);
  689. goto unmap_end;
  690. }
  691. }
  692. } else {
  693. for (i = 0; i < num_hdls; i++) {
  694. if (client == CAM_SMMU_MAPPING_USER) {
  695. rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
  696. fd, region);
  697. } else if (client == CAM_SMMU_MAPPING_KERNEL) {
  698. rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
  699. tbl.bufq[idx].dma_buf, region);
  700. } else {
  701. CAM_ERR(CAM_MEM,
  702. "invalid caller for unmapping : %d",
  703. client);
  704. rc = -EINVAL;
  705. }
  706. if (rc < 0) {
  707. CAM_ERR(CAM_MEM,
  708. "Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d",
  709. i, fd, mmu_hdls[i], region, rc);
  710. goto unmap_end;
  711. }
  712. }
  713. }
  714. return rc;
  715. unmap_end:
  716. CAM_ERR(CAM_MEM, "unmapping failed");
  717. return rc;
  718. }
  719. static void cam_mem_mgr_unmap_active_buf(int idx)
  720. {
  721. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  722. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
  723. region = CAM_SMMU_REGION_SHARED;
  724. else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  725. region = CAM_SMMU_REGION_IO;
  726. cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
  727. }
  728. static int cam_mem_mgr_cleanup_table(void)
  729. {
  730. int i;
  731. mutex_lock(&tbl.m_lock);
  732. for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
  733. if (!tbl.bufq[i].active) {
  734. CAM_DBG(CAM_MEM,
  735. "Buffer inactive at idx=%d, continuing", i);
  736. continue;
  737. } else {
  738. CAM_DBG(CAM_MEM,
  739. "Active buffer at idx=%d, possible leak needs unmapping",
  740. i);
  741. cam_mem_mgr_unmap_active_buf(i);
  742. }
  743. mutex_lock(&tbl.bufq[i].q_lock);
  744. if (tbl.bufq[i].dma_buf) {
  745. dma_buf_put(tbl.bufq[i].dma_buf);
  746. tbl.bufq[i].dma_buf = NULL;
  747. }
  748. tbl.bufq[i].fd = -1;
  749. tbl.bufq[i].flags = 0;
  750. tbl.bufq[i].buf_handle = -1;
  751. tbl.bufq[i].vaddr = 0;
  752. tbl.bufq[i].len = 0;
  753. memset(tbl.bufq[i].hdls, 0,
  754. sizeof(int32_t) * tbl.bufq[i].num_hdl);
  755. tbl.bufq[i].num_hdl = 0;
  756. tbl.bufq[i].dma_buf = NULL;
  757. tbl.bufq[i].active = false;
  758. mutex_unlock(&tbl.bufq[i].q_lock);
  759. mutex_destroy(&tbl.bufq[i].q_lock);
  760. }
  761. bitmap_zero(tbl.bitmap, tbl.bits);
  762. /* We need to reserve slot 0 because 0 is invalid */
  763. set_bit(0, tbl.bitmap);
  764. mutex_unlock(&tbl.m_lock);
  765. return 0;
  766. }
  767. void cam_mem_mgr_deinit(void)
  768. {
  769. atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
  770. cam_mem_mgr_cleanup_table();
  771. mutex_lock(&tbl.m_lock);
  772. bitmap_zero(tbl.bitmap, tbl.bits);
  773. kfree(tbl.bitmap);
  774. tbl.bitmap = NULL;
  775. mutex_unlock(&tbl.m_lock);
  776. mutex_destroy(&tbl.m_lock);
  777. }
  778. static int cam_mem_util_unmap(int32_t idx,
  779. enum cam_smmu_mapping_client client)
  780. {
  781. int rc = 0;
  782. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  783. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  784. CAM_ERR(CAM_MEM, "Incorrect index");
  785. return -EINVAL;
  786. }
  787. CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
  788. mutex_lock(&tbl.m_lock);
  789. if ((!tbl.bufq[idx].active) &&
  790. (tbl.bufq[idx].vaddr) == 0) {
  791. CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
  792. idx);
  793. mutex_unlock(&tbl.m_lock);
  794. return 0;
  795. }
  796. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
  797. if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
  798. rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
  799. tbl.bufq[idx].kmdvaddr);
  800. if (rc)
  801. CAM_ERR(CAM_MEM,
  802. "Failed, dmabuf=%pK, kmdvaddr=%pK",
  803. tbl.bufq[idx].dma_buf,
  804. (void *) tbl.bufq[idx].kmdvaddr);
  805. }
  806. }
  807. /* SHARED flag gets precedence, all other flags after it */
  808. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  809. region = CAM_SMMU_REGION_SHARED;
  810. } else {
  811. if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
  812. region = CAM_SMMU_REGION_IO;
  813. }
  814. if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
  815. (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
  816. (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
  817. if (cam_mem_util_unmap_hw_va(idx, region, client))
  818. CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
  819. tbl.bufq[idx].dma_buf);
  820. if (client == CAM_SMMU_MAPPING_KERNEL)
  821. tbl.bufq[idx].dma_buf = NULL;
  822. }
  823. mutex_lock(&tbl.bufq[idx].q_lock);
  824. tbl.bufq[idx].flags = 0;
  825. tbl.bufq[idx].buf_handle = -1;
  826. tbl.bufq[idx].vaddr = 0;
  827. memset(tbl.bufq[idx].hdls, 0,
  828. sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
  829. CAM_DBG(CAM_MEM,
  830. "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK",
  831. idx, tbl.bufq[idx].fd,
  832. tbl.bufq[idx].is_imported,
  833. tbl.bufq[idx].dma_buf);
  834. if (tbl.bufq[idx].dma_buf)
  835. dma_buf_put(tbl.bufq[idx].dma_buf);
  836. tbl.bufq[idx].fd = -1;
  837. tbl.bufq[idx].dma_buf = NULL;
  838. tbl.bufq[idx].is_imported = false;
  839. tbl.bufq[idx].len = 0;
  840. tbl.bufq[idx].num_hdl = 0;
  841. tbl.bufq[idx].active = false;
  842. mutex_unlock(&tbl.bufq[idx].q_lock);
  843. mutex_destroy(&tbl.bufq[idx].q_lock);
  844. clear_bit(idx, tbl.bitmap);
  845. mutex_unlock(&tbl.m_lock);
  846. return rc;
  847. }
  848. int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
  849. {
  850. int idx;
  851. int rc;
  852. if (!atomic_read(&cam_mem_mgr_state)) {
  853. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  854. return -EINVAL;
  855. }
  856. if (!cmd) {
  857. CAM_ERR(CAM_MEM, "Invalid argument");
  858. return -EINVAL;
  859. }
  860. idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
  861. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  862. CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
  863. idx);
  864. return -EINVAL;
  865. }
  866. if (!tbl.bufq[idx].active) {
  867. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  868. return -EINVAL;
  869. }
  870. if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
  871. CAM_ERR(CAM_MEM,
  872. "Released buf handle %d not matching within table %d, idx=%d",
  873. cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
  874. return -EINVAL;
  875. }
  876. CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
  877. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
  878. return rc;
  879. }
  880. int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
  881. struct cam_mem_mgr_memory_desc *out)
  882. {
  883. struct dma_buf *buf = NULL;
  884. int ion_fd = -1;
  885. int rc = 0;
  886. uint32_t heap_id;
  887. int32_t ion_flag = 0;
  888. uintptr_t kvaddr;
  889. dma_addr_t iova = 0;
  890. size_t request_len = 0;
  891. uint32_t mem_handle;
  892. int32_t idx;
  893. int32_t smmu_hdl = 0;
  894. int32_t num_hdl = 0;
  895. enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
  896. if (!atomic_read(&cam_mem_mgr_state)) {
  897. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  898. return -EINVAL;
  899. }
  900. if (!inp || !out) {
  901. CAM_ERR(CAM_MEM, "Invalid params");
  902. return -EINVAL;
  903. }
  904. if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
  905. inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
  906. inp->flags & CAM_MEM_FLAG_CACHE)) {
  907. CAM_ERR(CAM_MEM, "Invalid flags for request mem");
  908. return -EINVAL;
  909. }
  910. if (inp->flags & CAM_MEM_FLAG_CACHE)
  911. ion_flag |= ION_FLAG_CACHED;
  912. else
  913. ion_flag &= ~ION_FLAG_CACHED;
  914. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  915. ION_HEAP(ION_CAMERA_HEAP_ID);
  916. rc = cam_mem_util_get_dma_buf(inp->size,
  917. heap_id,
  918. ion_flag,
  919. &buf);
  920. if (rc) {
  921. CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
  922. goto ion_fail;
  923. } else {
  924. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  925. }
  926. /*
  927. * we are mapping kva always here,
  928. * update flags so that we do unmap properly
  929. */
  930. inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
  931. rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
  932. if (rc) {
  933. CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
  934. goto map_fail;
  935. }
  936. if (!inp->smmu_hdl) {
  937. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  938. rc = -EINVAL;
  939. goto smmu_fail;
  940. }
  941. /* SHARED flag gets precedence, all other flags after it */
  942. if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
  943. region = CAM_SMMU_REGION_SHARED;
  944. } else {
  945. if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
  946. region = CAM_SMMU_REGION_IO;
  947. }
  948. rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
  949. buf,
  950. CAM_SMMU_MAP_RW,
  951. &iova,
  952. &request_len,
  953. region);
  954. if (rc < 0) {
  955. CAM_ERR(CAM_MEM, "SMMU mapping failed");
  956. goto smmu_fail;
  957. }
  958. smmu_hdl = inp->smmu_hdl;
  959. num_hdl = 1;
  960. idx = cam_mem_get_slot();
  961. if (idx < 0) {
  962. rc = -ENOMEM;
  963. goto slot_fail;
  964. }
  965. mutex_lock(&tbl.bufq[idx].q_lock);
  966. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  967. tbl.bufq[idx].dma_buf = buf;
  968. tbl.bufq[idx].fd = -1;
  969. tbl.bufq[idx].flags = inp->flags;
  970. tbl.bufq[idx].buf_handle = mem_handle;
  971. tbl.bufq[idx].kmdvaddr = kvaddr;
  972. tbl.bufq[idx].vaddr = iova;
  973. tbl.bufq[idx].len = inp->size;
  974. tbl.bufq[idx].num_hdl = num_hdl;
  975. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  976. sizeof(int32_t));
  977. tbl.bufq[idx].is_imported = false;
  978. mutex_unlock(&tbl.bufq[idx].q_lock);
  979. out->kva = kvaddr;
  980. out->iova = (uint32_t)iova;
  981. out->smmu_hdl = smmu_hdl;
  982. out->mem_handle = mem_handle;
  983. out->len = inp->size;
  984. out->region = region;
  985. return rc;
  986. slot_fail:
  987. cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
  988. buf, region);
  989. smmu_fail:
  990. cam_mem_util_unmap_cpu_va(buf, kvaddr);
  991. map_fail:
  992. dma_buf_put(buf);
  993. ion_fail:
  994. return rc;
  995. }
  996. EXPORT_SYMBOL(cam_mem_mgr_request_mem);
  997. int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
  998. {
  999. int32_t idx;
  1000. int rc;
  1001. if (!atomic_read(&cam_mem_mgr_state)) {
  1002. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1003. return -EINVAL;
  1004. }
  1005. if (!inp) {
  1006. CAM_ERR(CAM_MEM, "Invalid argument");
  1007. return -EINVAL;
  1008. }
  1009. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1010. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1011. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1012. return -EINVAL;
  1013. }
  1014. if (!tbl.bufq[idx].active) {
  1015. if (tbl.bufq[idx].vaddr == 0) {
  1016. CAM_ERR(CAM_MEM, "buffer is released already");
  1017. return 0;
  1018. }
  1019. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1020. return -EINVAL;
  1021. }
  1022. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1023. CAM_ERR(CAM_MEM,
  1024. "Released buf handle not matching within table");
  1025. return -EINVAL;
  1026. }
  1027. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1028. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1029. return rc;
  1030. }
  1031. EXPORT_SYMBOL(cam_mem_mgr_release_mem);
  1032. int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
  1033. enum cam_smmu_region_id region,
  1034. struct cam_mem_mgr_memory_desc *out)
  1035. {
  1036. struct dma_buf *buf = NULL;
  1037. int rc = 0;
  1038. int ion_fd = -1;
  1039. uint32_t heap_id;
  1040. dma_addr_t iova = 0;
  1041. size_t request_len = 0;
  1042. uint32_t mem_handle;
  1043. int32_t idx;
  1044. int32_t smmu_hdl = 0;
  1045. int32_t num_hdl = 0;
  1046. if (!atomic_read(&cam_mem_mgr_state)) {
  1047. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1048. return -EINVAL;
  1049. }
  1050. if (!inp || !out) {
  1051. CAM_ERR(CAM_MEM, "Invalid param(s)");
  1052. return -EINVAL;
  1053. }
  1054. if (!inp->smmu_hdl) {
  1055. CAM_ERR(CAM_MEM, "Invalid SMMU handle");
  1056. return -EINVAL;
  1057. }
  1058. if (region != CAM_SMMU_REGION_SECHEAP) {
  1059. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1060. return -EINVAL;
  1061. }
  1062. heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
  1063. ION_HEAP(ION_CAMERA_HEAP_ID);
  1064. rc = cam_mem_util_get_dma_buf(inp->size,
  1065. heap_id,
  1066. 0,
  1067. &buf);
  1068. if (rc) {
  1069. CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
  1070. goto ion_fail;
  1071. } else {
  1072. CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
  1073. }
  1074. rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
  1075. buf,
  1076. &iova,
  1077. &request_len);
  1078. if (rc) {
  1079. CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
  1080. goto smmu_fail;
  1081. }
  1082. smmu_hdl = inp->smmu_hdl;
  1083. num_hdl = 1;
  1084. idx = cam_mem_get_slot();
  1085. if (idx < 0) {
  1086. rc = -ENOMEM;
  1087. goto slot_fail;
  1088. }
  1089. mutex_lock(&tbl.bufq[idx].q_lock);
  1090. mem_handle = GET_MEM_HANDLE(idx, ion_fd);
  1091. tbl.bufq[idx].fd = -1;
  1092. tbl.bufq[idx].dma_buf = buf;
  1093. tbl.bufq[idx].flags = inp->flags;
  1094. tbl.bufq[idx].buf_handle = mem_handle;
  1095. tbl.bufq[idx].kmdvaddr = 0;
  1096. tbl.bufq[idx].vaddr = iova;
  1097. tbl.bufq[idx].len = request_len;
  1098. tbl.bufq[idx].num_hdl = num_hdl;
  1099. memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
  1100. sizeof(int32_t));
  1101. tbl.bufq[idx].is_imported = false;
  1102. mutex_unlock(&tbl.bufq[idx].q_lock);
  1103. out->kva = 0;
  1104. out->iova = (uint32_t)iova;
  1105. out->smmu_hdl = smmu_hdl;
  1106. out->mem_handle = mem_handle;
  1107. out->len = request_len;
  1108. out->region = region;
  1109. return rc;
  1110. slot_fail:
  1111. cam_smmu_release_sec_heap(smmu_hdl);
  1112. smmu_fail:
  1113. dma_buf_put(buf);
  1114. ion_fail:
  1115. return rc;
  1116. }
  1117. EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
  1118. int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
  1119. {
  1120. int32_t idx;
  1121. int rc;
  1122. int32_t smmu_hdl;
  1123. if (!atomic_read(&cam_mem_mgr_state)) {
  1124. CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
  1125. return -EINVAL;
  1126. }
  1127. if (!inp) {
  1128. CAM_ERR(CAM_MEM, "Invalid argument");
  1129. return -EINVAL;
  1130. }
  1131. if (inp->region != CAM_SMMU_REGION_SECHEAP) {
  1132. CAM_ERR(CAM_MEM, "Only secondary heap supported");
  1133. return -EINVAL;
  1134. }
  1135. idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
  1136. if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
  1137. CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
  1138. return -EINVAL;
  1139. }
  1140. if (!tbl.bufq[idx].active) {
  1141. if (tbl.bufq[idx].vaddr == 0) {
  1142. CAM_ERR(CAM_MEM, "buffer is released already");
  1143. return 0;
  1144. }
  1145. CAM_ERR(CAM_MEM, "Released buffer state should be active");
  1146. return -EINVAL;
  1147. }
  1148. if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
  1149. CAM_ERR(CAM_MEM,
  1150. "Released buf handle not matching within table");
  1151. return -EINVAL;
  1152. }
  1153. if (tbl.bufq[idx].num_hdl != 1) {
  1154. CAM_ERR(CAM_MEM,
  1155. "Sec heap region should have only one smmu hdl");
  1156. return -ENODEV;
  1157. }
  1158. memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
  1159. sizeof(int32_t));
  1160. if (inp->smmu_hdl != smmu_hdl) {
  1161. CAM_ERR(CAM_MEM,
  1162. "Passed SMMU handle doesn't match with internal hdl");
  1163. return -ENODEV;
  1164. }
  1165. rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
  1166. if (rc) {
  1167. CAM_ERR(CAM_MEM,
  1168. "Sec heap region release failed");
  1169. return -ENODEV;
  1170. }
  1171. CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
  1172. rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
  1173. if (rc)
  1174. CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
  1175. return rc;
  1176. }
  1177. EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);