cam_smmu_api.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #ifndef _CAM_SMMU_API_H_
  6. #define _CAM_SMMU_API_H_
  7. #include <linux/dma-direction.h>
  8. #include <linux/module.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/dma-direction.h>
  11. #include <linux/of_platform.h>
  12. #include <linux/iommu.h>
  13. #include <linux/random.h>
  14. #include <linux/spinlock_types.h>
  15. #include <linux/mutex.h>
  16. #include <linux/msm_ion.h>
  17. /*Enum for possible CAM SMMU operations */
  18. enum cam_smmu_ops_param {
  19. CAM_SMMU_ATTACH,
  20. CAM_SMMU_DETACH,
  21. CAM_SMMU_VOTE,
  22. CAM_SMMU_DEVOTE,
  23. CAM_SMMU_OPS_INVALID
  24. };
  25. enum cam_smmu_map_dir {
  26. CAM_SMMU_MAP_READ,
  27. CAM_SMMU_MAP_WRITE,
  28. CAM_SMMU_MAP_RW,
  29. CAM_SMMU_MAP_INVALID
  30. };
  31. enum cam_smmu_region_id {
  32. CAM_SMMU_REGION_FIRMWARE,
  33. CAM_SMMU_REGION_SHARED,
  34. CAM_SMMU_REGION_SCRATCH,
  35. CAM_SMMU_REGION_IO,
  36. CAM_SMMU_REGION_SECHEAP,
  37. CAM_SMMU_REGION_QDSS,
  38. CAM_SMMU_REGION_FWUNCACHED,
  39. };
  40. /**
  41. * @brief : cam_smmu_pf_info
  42. *
  43. * @param domain : Iommu domain received in iommu page fault handler
  44. * @param dev : Device received in iommu page fault handler
  45. * @param iova : IOVA where page fault occurred
  46. * @param flags : Flags received in iommu page fault handler
  47. * @param token : Userdata given during callback registration
  48. * @param buf_info : Closest mapped buffer info
  49. * @bid : bus id
  50. * @pid : unique id for hw group of ports
  51. * @mid : port id of hw
  52. */
  53. struct cam_smmu_pf_info {
  54. struct iommu_domain *domain;
  55. struct device *dev;
  56. unsigned long iova;
  57. int flags;
  58. void *token;
  59. uint32_t buf_info;
  60. uint32_t bid;
  61. uint32_t pid;
  62. uint32_t mid;
  63. };
  64. /**
  65. * @brief : Structure to store region information
  66. *
  67. * @param iova_start : Start address of region
  68. * @param iova_len : length of region
  69. * @param discard_iova_start : iova addr start from where should not be used
  70. * @param discard_iova_len : length of discard iova region
  71. */
  72. struct cam_smmu_region_info {
  73. dma_addr_t iova_start;
  74. size_t iova_len;
  75. dma_addr_t discard_iova_start;
  76. size_t discard_iova_len;
  77. };
  78. /**
  79. * @brief : Gets an smmu handle
  80. *
  81. * @param identifier: Unique identifier to be used by clients which they
  82. * should get from device tree. CAM SMMU driver will
  83. * not enforce how this string is obtained and will
  84. * only validate this against the list of permitted
  85. * identifiers
  86. * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
  87. * fill the handle pointed by handle_ptr
  88. * @return Status of operation. Negative in case of error. Zero otherwise.
  89. */
  90. int cam_smmu_get_handle(char *identifier, int *handle_ptr);
  91. /**
  92. * @brief : Performs IOMMU operations
  93. *
  94. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  95. * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH
  96. * or CAM_SMMU_DETACH
  97. *
  98. * @return Status of operation. Negative in case of error. Zero otherwise.
  99. */
  100. int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
  101. /**
  102. * @brief : Maps user space IOVA for calling driver
  103. *
  104. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  105. * @param ion_fd: ION handle identifying the memory buffer.
  106. * @param dis_delayed_unmap: Whether to disable Delayed Unmap feature
  107. * for this mapping
  108. * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
  109. * DMA_TO_DEVICE or DMA_FROM_DEVICE
  110. * @dma_addr : Pointer to physical address where mapped address will be
  111. * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
  112. * CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
  113. * which specifies the cpu virtual address to map.
  114. * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
  115. * @region_id : Memory region identifier
  116. * @is_internal: Specifies if this buffer is kernel allocated.
  117. * @return Status of operation. Negative in case of error. Zero otherwise.
  118. */
  119. int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
  120. enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
  121. enum cam_smmu_region_id region_id, bool is_internal);
  122. /**
  123. * @brief : Maps kernel space IOVA for calling driver
  124. *
  125. * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  126. * @param buf : dma_buf allocated for kernel usage in mem_mgr
  127. * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
  128. * DMA_TO_DEVICE or DMA_FROM_DEVICE
  129. * @dma_addr : Pointer to physical address where mapped address will be
  130. * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
  131. * CAM_SMMU_REGION_SHARED, dma_addr is used as an input
  132. * parameter which specifies the cpu virtual address to map.
  133. * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
  134. * @region_id : Memory region identifier
  135. * @return Status of operation. Negative in case of error. Zero otherwise.
  136. */
  137. int cam_smmu_map_kernel_iova(int handle,
  138. struct dma_buf *buf, enum cam_smmu_map_dir dir,
  139. dma_addr_t *dma_addr, size_t *len_ptr,
  140. enum cam_smmu_region_id region_id);
  141. /**
  142. * @brief : Unmaps user space IOVA for calling driver
  143. *
  144. * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  145. * @param ion_fd: ION handle identifying the memory buffer.
  146. *
  147. * @return Status of operation. Negative in case of error. Zero otherwise.
  148. */
  149. int cam_smmu_unmap_user_iova(int handle,
  150. int ion_fd, enum cam_smmu_region_id region_id);
  151. /**
  152. * @brief : Unmaps kernel IOVA for calling driver
  153. *
  154. * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  155. * @param buf : dma_buf allocated for the kernel
  156. *
  157. * @return Status of operation. Negative in case of error. Zero otherwise.
  158. */
  159. int cam_smmu_unmap_kernel_iova(int handle,
  160. struct dma_buf *buf, enum cam_smmu_region_id region_id);
  161. /**
  162. * @brief : Allocates a scratch buffer
  163. *
  164. * This function allocates a scratch virtual buffer of length virt_len in the
  165. * device virtual address space mapped to phys_len physically contiguous bytes
  166. * in that device's SMMU.
  167. *
  168. * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
  169. * other, otherwise -EINVAL is returned.
  170. *
  171. * -EINVAL will be returned if virt_len is less than phys_len.
  172. *
  173. * Passing a too large phys_len might also cause failure if that much size is
  174. * not available for allocation in a physically contiguous way.
  175. *
  176. * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  177. * @param dir : Direction of mapping which will translate to IOMMU_READ
  178. * IOMMU_WRITE or a bit mask of both.
  179. * @param paddr_ptr: Device virtual address that the client device will be
  180. * able to read from/write to
  181. * @param virt_len : Virtual length of the scratch buffer
  182. * @param phys_len : Physical length of the scratch buffer
  183. *
  184. * @return Status of operation. Negative in case of error. Zero otherwise.
  185. */
  186. int cam_smmu_get_scratch_iova(int handle,
  187. enum cam_smmu_map_dir dir,
  188. dma_addr_t *paddr_ptr,
  189. size_t virt_len,
  190. size_t phys_len);
  191. /**
  192. * @brief : Frees a scratch buffer
  193. *
  194. * This function frees a scratch buffer and releases the corresponding SMMU
  195. * mappings.
  196. *
  197. * @param handle : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
  198. * @param paddr : Device virtual address of client's scratch buffer that
  199. * will be freed.
  200. *
  201. * @return Status of operation. Negative in case of error. Zero otherwise.
  202. */
  203. int cam_smmu_put_scratch_iova(int handle,
  204. dma_addr_t paddr);
  205. /**
  206. *@brief : Destroys an smmu handle
  207. *
  208. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  209. *
  210. * @return Status of operation. Negative in case of error. Zero otherwise.
  211. */
  212. int cam_smmu_destroy_handle(int handle);
  213. /**
  214. * @brief : Finds index by handle in the smmu client table
  215. *
  216. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  217. * @return Index of SMMU client. Nagative in case of error.
  218. */
  219. int cam_smmu_find_index_by_handle(int hdl);
  220. /**
  221. * @brief : Registers smmu fault handler for client
  222. *
  223. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  224. * @param handler_cb: It is triggered in IOMMU page fault
  225. * @param token: It is input param when trigger page fault handler
  226. */
  227. void cam_smmu_set_client_page_fault_handler(int handle,
  228. void (*handler_cb)(struct cam_smmu_pf_info *pf_info), void *token);
  229. /**
  230. * @brief : Unregisters smmu fault handler for client
  231. *
  232. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  233. * @param token: It is input param when trigger page fault handler
  234. */
  235. void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
  236. /**
  237. * @brief Maps memory from an ION fd into IOVA space
  238. *
  239. * @param handle: SMMU handle identifying the context bank to map to
  240. * @param ion_fd: ION fd of memory to map to
  241. * @param paddr_ptr: Pointer IOVA address that will be returned
  242. * @param len_ptr: Length of memory mapped
  243. *
  244. * @return Status of operation. Negative in case of error. Zero otherwise.
  245. */
  246. int cam_smmu_get_iova(int handle, int ion_fd,
  247. dma_addr_t *paddr_ptr, size_t *len_ptr);
  248. /**
  249. * @brief Maps memory from an ION fd into IOVA space
  250. *
  251. * @param handle: SMMU handle identifying the secure context bank to map to
  252. * @param ion_fd: ION fd of memory to map to
  253. * @param paddr_ptr: Pointer IOVA address that will be returned
  254. * @param len_ptr: Length of memory mapped
  255. *
  256. * @return Status of operation. Negative in case of error. Zero otherwise.
  257. */
  258. int cam_smmu_get_stage2_iova(int handle, int ion_fd,
  259. dma_addr_t *paddr_ptr, size_t *len_ptr);
  260. /**
  261. * @brief Unmaps memory from context bank
  262. *
  263. * @param handle: SMMU handle identifying the context bank
  264. * @param ion_fd: ION fd of memory to unmap
  265. *
  266. * @return Status of operation. Negative in case of error. Zero otherwise.
  267. */
  268. int cam_smmu_put_iova(int handle, int ion_fd);
  269. /**
  270. * @brief Maps secure memory for SMMU handle
  271. *
  272. * @param handle: SMMU handle identifying secure context bank
  273. * @param ion_fd: ION fd to map securely
  274. * @param dir: DMA Direction for the mapping
  275. * @param dma_addr: Returned IOVA address after mapping
  276. * @param len_ptr: Length of memory mapped
  277. *
  278. * @return Status of operation. Negative in case of error. Zero otherwise.
  279. */
  280. int cam_smmu_map_stage2_iova(int handle,
  281. int ion_fd, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr,
  282. size_t *len_ptr);
  283. /**
  284. * @brief Unmaps secure memopry for SMMU handle
  285. *
  286. * @param handle: SMMU handle identifying secure context bank
  287. * @param ion_fd: ION fd to unmap
  288. *
  289. * @return Status of operation. Negative in case of error. Zero otherwise.
  290. */
  291. int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
  292. /**
  293. * @brief Allocates firmware for context bank
  294. *
  295. * @param smmu_hdl: SMMU handle identifying context bank
  296. * @param iova: IOVA address of allocated firmware
  297. * @param kvaddr: CPU mapped address of allocated firmware
  298. * @param len: Length of allocated firmware memory
  299. *
  300. * @return Status of operation. Negative in case of error. Zero otherwise.
  301. */
  302. int cam_smmu_alloc_firmware(int32_t smmu_hdl,
  303. dma_addr_t *iova,
  304. uintptr_t *kvaddr,
  305. size_t *len);
  306. /**
  307. * @brief Deallocates firmware memory for context bank
  308. *
  309. * @param smmu_hdl: SMMU handle identifying the context bank
  310. *
  311. * @return Status of operation. Negative in case of error. Zero otherwise.
  312. */
  313. int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
  314. /**
  315. * @brief Gets region information specified by smmu handle and region id
  316. *
  317. * @param smmu_hdl: SMMU handle identifying the context bank
  318. * @param region_id: Region id for which information is desired
  319. * @param region_info: Struct populated with region information
  320. *
  321. * @return Status of operation. Negative in case of error. Zero otherwise.
  322. */
  323. int cam_smmu_get_region_info(int32_t smmu_hdl,
  324. enum cam_smmu_region_id region_id,
  325. struct cam_smmu_region_info *region_info);
  326. /**
  327. * @brief Reserves a region with buffer
  328. *
  329. * @param region: Region id
  330. * @param smmu_hdl: SMMU handle identifying the context bank
  331. * @param iova: IOVA of secondary heap after reservation has completed
  332. * @param buf: Allocated dma_buf for secondary heap
  333. * @param request_len: Length of secondary heap after reservation has completed
  334. *
  335. * @return Status of operation. Negative in case of error. Zero otherwise.
  336. */
  337. int cam_smmu_reserve_buf_region(enum cam_smmu_region_id region,
  338. int32_t smmu_hdl, struct dma_buf *buf,
  339. dma_addr_t *iova, size_t *request_len);
  340. /**
  341. * @brief Releases buffer in reserved region
  342. *
  343. * @param region: Region id
  344. * @param smmu_hdl: SMMU handle identifying the context bank
  345. *
  346. * @return Status of operation. Negative in case of error. Zero otherwise.
  347. */
  348. int cam_smmu_release_buf_region(enum cam_smmu_region_id region,
  349. int32_t smmu_hdl);
  350. /**
  351. * @brief Allocates qdss for context bank
  352. *
  353. * @param smmu_hdl: SMMU handle identifying context bank
  354. * @param iova: IOVA address of allocated qdss
  355. * @param len: Length of allocated qdss memory
  356. *
  357. * @return Status of operation. Negative in case of error. Zero otherwise.
  358. */
  359. int cam_smmu_alloc_qdss(int32_t smmu_hdl,
  360. dma_addr_t *iova,
  361. size_t *len);
  362. /**
  363. * @brief Deallocates qdss memory for context bank
  364. *
  365. * @param smmu_hdl: SMMU handle identifying the context bank
  366. *
  367. * @return Status of operation. Negative in case of error. Zero otherwise.
  368. */
  369. int cam_smmu_dealloc_qdss(int32_t smmu_hdl);
  370. /**
  371. * @brief Get start addr & len of I/O region for a given cb
  372. *
  373. * @param smmu_hdl: SMMU handle identifying the context bank
  374. * @param iova: IOVA address of allocated I/O region
  375. * @param len: Length of allocated I/O memory
  376. * @param discard_iova_start: Start address of io space to discard
  377. * @param discard_iova_len: Length of io space to discard
  378. *
  379. * @return Status of operation. Negative in case of error. Zero otherwise.
  380. */
  381. int cam_smmu_get_io_region_info(int32_t smmu_hdl,
  382. dma_addr_t *iova, size_t *len,
  383. dma_addr_t *discard_iova_start, size_t *discard_iova_len);
  384. /**
  385. * @brief : API to reset the call context bank page fault count
  386. * This should be done on the starting of new camera open
  387. * @return void.
  388. */
  389. void cam_smmu_reset_cb_page_fault_cnt(void);
  390. /**
  391. * @brief : API to register SMMU hw to platform framework.
  392. * @return struct platform_device pointer on on success, or ERR_PTR() on error.
  393. */
  394. int cam_smmu_init_module(void);
  395. /**
  396. * @brief : API to remove SMMU Hw from platform framework.
  397. */
  398. void cam_smmu_exit_module(void);
  399. /**
  400. * @brief : API to determine whether to force all allocations to CACHED
  401. */
  402. int cam_smmu_need_force_alloc_cached(bool *force_alloc_cached);
  403. /**
  404. * @brief : API to determine whether padding is needed for shared buffers
  405. */
  406. bool cam_smmu_need_shared_buffer_padding(void);
  407. #endif /* _CAM_SMMU_API_H_ */