cam_smmu_api.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef _CAM_SMMU_API_H_
  7. #define _CAM_SMMU_API_H_
  8. #include <linux/dma-direction.h>
  9. #include <linux/module.h>
  10. #include <linux/dma-buf.h>
  11. #include <linux/dma-direction.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/iommu.h>
  14. #include <linux/random.h>
  15. #include <linux/spinlock_types.h>
  16. #include <linux/mutex.h>
  17. #include <linux/msm_ion.h>
  18. #define CAM_SMMU_GET_IOVA_DELTA(val1, val2) \
  19. ({ \
  20. (val1) > (val2) ? (val1) - (val2) : (val2) - (val1); \
  21. })
  22. /*Enum for possible CAM SMMU operations */
  23. enum cam_smmu_ops_param {
  24. CAM_SMMU_ATTACH,
  25. CAM_SMMU_DETACH,
  26. CAM_SMMU_VOTE,
  27. CAM_SMMU_DEVOTE,
  28. CAM_SMMU_OPS_INVALID
  29. };
  30. enum cam_smmu_map_dir {
  31. CAM_SMMU_MAP_READ,
  32. CAM_SMMU_MAP_WRITE,
  33. CAM_SMMU_MAP_RW,
  34. CAM_SMMU_MAP_INVALID
  35. };
  36. enum cam_smmu_region_id {
  37. CAM_SMMU_REGION_FIRMWARE,
  38. CAM_SMMU_REGION_SHARED,
  39. CAM_SMMU_REGION_SCRATCH,
  40. CAM_SMMU_REGION_IO,
  41. CAM_SMMU_REGION_SECHEAP,
  42. CAM_SMMU_REGION_QDSS,
  43. CAM_SMMU_REGION_FWUNCACHED,
  44. };
  45. /**
  46. * @brief : cam_smmu_pf_info
  47. *
  48. * @param domain : Iommu domain received in iommu page fault handler
  49. * @param dev : Device received in iommu page fault handler
  50. * @param iova : IOVA where page fault occurred
  51. * @param flags : Flags received in iommu page fault handler
  52. * @param token : Userdata given during callback registration
  53. * @param buf_info : Closest mapped buffer info
  54. * @param bid : bus id
  55. * @param pid : unique id for hw group of ports
  56. * @param mid : port id of hw
  57. * @param is_secure : Faulted memory in secure or non-secure region
  58. * @param in_map_region : Faulted memory fall in mapped region or not
  59. */
  60. struct cam_smmu_pf_info {
  61. struct iommu_domain *domain;
  62. struct device *dev;
  63. unsigned long iova;
  64. int flags;
  65. void *token;
  66. uint32_t buf_info;
  67. uint32_t bid;
  68. uint32_t pid;
  69. uint32_t mid;
  70. bool is_secure;
  71. bool in_map_region;
  72. };
  73. /**
  74. * @brief : Structure to store region information
  75. *
  76. * @param iova_start : Start address of region
  77. * @param iova_len : length of region
  78. * @param discard_iova_start : iova addr start from where should not be used
  79. * @param discard_iova_len : length of discard iova region
  80. */
  81. struct cam_smmu_region_info {
  82. dma_addr_t iova_start;
  83. size_t iova_len;
  84. dma_addr_t discard_iova_start;
  85. size_t discard_iova_len;
  86. };
  87. /**
  88. * @brief : Gets an smmu handle
  89. *
  90. * @param identifier: Unique identifier to be used by clients which they
  91. * should get from device tree. CAM SMMU driver will
  92. * not enforce how this string is obtained and will
  93. * only validate this against the list of permitted
  94. * identifiers
  95. * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
  96. * fill the handle pointed by handle_ptr
  97. * @return Status of operation. Negative in case of error. Zero otherwise.
  98. */
  99. int cam_smmu_get_handle(char *identifier, int *handle_ptr);
  100. /**
  101. * @brief : Performs IOMMU operations
  102. *
  103. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  104. * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH
  105. * or CAM_SMMU_DETACH
  106. *
  107. * @return Status of operation. Negative in case of error. Zero otherwise.
  108. */
  109. int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
  110. /**
  111. * @brief : Maps user space IOVA for calling driver
  112. *
  113. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  114. * @param ion_fd: ION handle identifying the memory buffer.
  115. * @param dmabuf: DMA buf handle identifying the memory buffer.
  116. * @param dis_delayed_unmap: Whether to disable Delayed Unmap feature
  117. * for this mapping
  118. * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
  119. * DMA_TO_DEVICE or DMA_FROM_DEVICE
  120. * @dma_addr : Pointer to physical address where mapped address will be
  121. * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
  122. * CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
  123. * which specifies the cpu virtual address to map.
  124. * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
  125. * @region_id : Memory region identifier
  126. * @is_internal: Specifies if this buffer is kernel allocated.
  127. * @return Status of operation. Negative in case of error. Zero otherwise.
  128. */
  129. int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
  130. bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
  131. enum cam_smmu_region_id region_id, bool is_internal);
  132. /**
  133. * @brief : Maps kernel space IOVA for calling driver
  134. *
  135. * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  136. * @param buf : dma_buf allocated for kernel usage in mem_mgr
  137. * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
  138. * DMA_TO_DEVICE or DMA_FROM_DEVICE
  139. * @dma_addr : Pointer to physical address where mapped address will be
  140. * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
  141. * CAM_SMMU_REGION_SHARED, dma_addr is used as an input
  142. * parameter which specifies the cpu virtual address to map.
  143. * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
  144. * @region_id : Memory region identifier
  145. * @return Status of operation. Negative in case of error. Zero otherwise.
  146. */
  147. int cam_smmu_map_kernel_iova(int handle,
  148. struct dma_buf *buf, enum cam_smmu_map_dir dir,
  149. dma_addr_t *dma_addr, size_t *len_ptr,
  150. enum cam_smmu_region_id region_id);
  151. /**
  152. * @brief : Unmaps user space IOVA for calling driver
  153. *
  154. * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  155. * @param ion_fd: ION handle identifying the memory buffer.
  156. * @param dma_buf: DMA Buf handle identifying the memory buffer.
  157. * @param region_id: Region id from which to unmap buffer.
  158. *
  159. * @return Status of operation. Negative in case of error. Zero otherwise.
  160. */
  161. int cam_smmu_unmap_user_iova(int handle,
  162. int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id);
  163. /**
  164. * @brief : Unmaps kernel IOVA for calling driver
  165. *
  166. * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  167. * @param buf : dma_buf allocated for the kernel
  168. *
  169. * @return Status of operation. Negative in case of error. Zero otherwise.
  170. */
  171. int cam_smmu_unmap_kernel_iova(int handle,
  172. struct dma_buf *buf, enum cam_smmu_region_id region_id);
  173. /**
  174. * @brief : Allocates a scratch buffer
  175. *
  176. * This function allocates a scratch virtual buffer of length virt_len in the
  177. * device virtual address space mapped to phys_len physically contiguous bytes
  178. * in that device's SMMU.
  179. *
  180. * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
  181. * other, otherwise -EINVAL is returned.
  182. *
  183. * -EINVAL will be returned if virt_len is less than phys_len.
  184. *
  185. * Passing a too large phys_len might also cause failure if that much size is
  186. * not available for allocation in a physically contiguous way.
  187. *
  188. * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  189. * @param dir : Direction of mapping which will translate to IOMMU_READ
  190. * IOMMU_WRITE or a bit mask of both.
  191. * @param paddr_ptr: Device virtual address that the client device will be
  192. * able to read from/write to
  193. * @param virt_len : Virtual length of the scratch buffer
  194. * @param phys_len : Physical length of the scratch buffer
  195. *
  196. * @return Status of operation. Negative in case of error. Zero otherwise.
  197. */
  198. int cam_smmu_get_scratch_iova(int handle,
  199. enum cam_smmu_map_dir dir,
  200. dma_addr_t *paddr_ptr,
  201. size_t virt_len,
  202. size_t phys_len);
  203. /**
  204. * @brief : Frees a scratch buffer
  205. *
  206. * This function frees a scratch buffer and releases the corresponding SMMU
  207. * mappings.
  208. *
  209. * @param handle : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
  210. * @param paddr : Device virtual address of client's scratch buffer that
  211. * will be freed.
  212. *
  213. * @return Status of operation. Negative in case of error. Zero otherwise.
  214. */
  215. int cam_smmu_put_scratch_iova(int handle,
  216. dma_addr_t paddr);
  217. /**
  218. *@brief : Destroys an smmu handle
  219. *
  220. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  221. *
  222. * @return Status of operation. Negative in case of error. Zero otherwise.
  223. */
  224. int cam_smmu_destroy_handle(int handle);
  225. /**
  226. * @brief : Returns if context bank identified by handle has a shared region
  227. *
  228. * @param handle: Handle to identify the context bank
  229. * @return : True if context banks supports shared region, false otherwise
  230. * @note : Currently, only ICP context banks support shared regions.
  231. */
  232. bool cam_smmu_supports_shared_region(int handle);
  233. /**
  234. * @brief : Registers smmu fault handler for client
  235. *
  236. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  237. * @param handler_cb: It is triggered in IOMMU page fault
  238. * @param token: It is input param when trigger page fault handler
  239. */
  240. void cam_smmu_set_client_page_fault_handler(int handle,
  241. void (*handler_cb)(struct cam_smmu_pf_info *pf_info), void *token);
  242. /**
  243. * @brief : Unregisters smmu fault handler for client
  244. *
  245. * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  246. * @param token: It is input param when trigger page fault handler
  247. */
  248. void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
  249. /**
  250. * @brief Maps memory from an ION fd into IOVA space
  251. *
  252. * @param handle: SMMU handle identifying the context bank to map to
  253. * @param ion_fd: ION fd of memory to map to
  254. * @param dma_buf: DMA buf of memory to map to
  255. * @param paddr_ptr: Pointer IOVA address that will be returned
  256. * @param len_ptr: Length of memory mapped
  257. *
  258. * @return Status of operation. Negative in case of error. Zero otherwise.
  259. */
  260. int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
  261. dma_addr_t *paddr_ptr, size_t *len_ptr);
  262. /**
  263. * @brief Maps memory from an ION fd into IOVA space
  264. *
  265. * @param handle: SMMU handle identifying the secure context bank to map to
  266. * @param ion_fd: ION fd of memory to map to
  267. * @param dma_buf: DMA Buf of memory to map to
  268. * @param paddr_ptr: Pointer IOVA address that will be returned
  269. * @param len_ptr: Length of memory mapped
  270. *
  271. * @return Status of operation. Negative in case of error. Zero otherwise.
  272. */
  273. int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
  274. dma_addr_t *paddr_ptr, size_t *len_ptr);
  275. /**
  276. * @brief Unmaps memory from context bank
  277. *
  278. * @param handle: SMMU handle identifying the context bank
  279. * @param ion_fd: ION fd of memory to unmap
  280. * @param dma_buf: DMA Buf of memory to unmap
  281. *
  282. * @return Status of operation. Negative in case of error. Zero otherwise.
  283. */
  284. int cam_smmu_put_iova(int handle, int ion_fd, struct dma_buf *dma_buf);
  285. /**
  286. * @brief Maps secure memory for SMMU handle
  287. *
  288. * @param handle: SMMU handle identifying secure context bank
  289. * @param ion_fd: ION fd to map securely
  290. * @param dmabuf: DMA buf to map securely
  291. * @param dir: DMA Direction for the mapping
  292. * @param dma_addr: Returned IOVA address after mapping
  293. * @param len_ptr: Length of memory mapped
  294. *
  295. * @return Status of operation. Negative in case of error. Zero otherwise.
  296. */
  297. int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
  298. enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr);
  299. /**
  300. * @brief Unmaps secure memopry for SMMU handle
  301. *
  302. * @param handle: SMMU handle identifying secure context bank
  303. * @param ion_fd: ION fd to unmap
  304. * @param dma_buf: DMA Buf to unmap
  305. *
  306. * @return Status of operation. Negative in case of error. Zero otherwise.
  307. */
  308. int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf);
  309. /**
  310. * @brief Allocates firmware for context bank
  311. *
  312. * @param smmu_hdl: SMMU handle identifying context bank
  313. * @param iova: IOVA address of allocated firmware
  314. * @param kvaddr: CPU mapped address of allocated firmware
  315. * @param len: Length of allocated firmware memory
  316. *
  317. * @return Status of operation. Negative in case of error. Zero otherwise.
  318. */
  319. int cam_smmu_alloc_firmware(int32_t smmu_hdl,
  320. dma_addr_t *iova,
  321. uintptr_t *kvaddr,
  322. size_t *len);
  323. /**
  324. * @brief Deallocates firmware memory for context bank
  325. *
  326. * @param smmu_hdl: SMMU handle identifying the context bank
  327. *
  328. * @return Status of operation. Negative in case of error. Zero otherwise.
  329. */
  330. int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
  331. /**
  332. * @brief Gets region information specified by smmu handle and region id
  333. *
  334. * @param smmu_hdl: SMMU handle identifying the context bank
  335. * @param region_id: Region id for which information is desired
  336. * @param region_info: Struct populated with region information
  337. *
  338. * @return Status of operation. Negative in case of error. Zero otherwise.
  339. */
  340. int cam_smmu_get_region_info(int32_t smmu_hdl,
  341. enum cam_smmu_region_id region_id,
  342. struct cam_smmu_region_info *region_info);
  343. /**
  344. * @brief Reserves a region with buffer
  345. *
  346. * @param region: Region id
  347. * @param smmu_hdl: SMMU handle identifying the context bank
  348. * @param iova: IOVA of secondary heap after reservation has completed
  349. * @param buf: Allocated dma_buf for secondary heap
  350. * @param request_len: Length of secondary heap after reservation has completed
  351. *
  352. * @return Status of operation. Negative in case of error. Zero otherwise.
  353. */
  354. int cam_smmu_reserve_buf_region(enum cam_smmu_region_id region,
  355. int32_t smmu_hdl, struct dma_buf *buf,
  356. dma_addr_t *iova, size_t *request_len);
  357. /**
  358. * @brief Releases buffer in reserved region
  359. *
  360. * @param region: Region id
  361. * @param smmu_hdl: SMMU handle identifying the context bank
  362. *
  363. * @return Status of operation. Negative in case of error. Zero otherwise.
  364. */
  365. int cam_smmu_release_buf_region(enum cam_smmu_region_id region,
  366. int32_t smmu_hdl);
  367. /**
  368. * @brief Allocates qdss for context bank
  369. *
  370. * @param smmu_hdl: SMMU handle identifying context bank
  371. * @param iova: IOVA address of allocated qdss
  372. * @param len: Length of allocated qdss memory
  373. *
  374. * @return Status of operation. Negative in case of error. Zero otherwise.
  375. */
  376. int cam_smmu_alloc_qdss(int32_t smmu_hdl,
  377. dma_addr_t *iova,
  378. size_t *len);
  379. /**
  380. * @brief Deallocates qdss memory for context bank
  381. *
  382. * @param smmu_hdl: SMMU handle identifying the context bank
  383. *
  384. * @return Status of operation. Negative in case of error. Zero otherwise.
  385. */
  386. int cam_smmu_dealloc_qdss(int32_t smmu_hdl);
  387. /**
  388. * @brief Get start addr & len of I/O region for a given cb
  389. *
  390. * @param smmu_hdl: SMMU handle identifying the context bank
  391. * @param iova: IOVA address of allocated I/O region
  392. * @param len: Length of allocated I/O memory
  393. * @param discard_iova_start: Start address of io space to discard
  394. * @param discard_iova_len: Length of io space to discard
  395. *
  396. * @return Status of operation. Negative in case of error. Zero otherwise.
  397. */
  398. int cam_smmu_get_io_region_info(int32_t smmu_hdl,
  399. dma_addr_t *iova, size_t *len,
  400. dma_addr_t *discard_iova_start, size_t *discard_iova_len);
  401. /**
  402. * @brief : API to reset the call context bank page fault count
  403. * This should be done on the starting of new camera open
  404. * @return void.
  405. */
  406. void cam_smmu_reset_cb_page_fault_cnt(void);
  407. /**
  408. * @brief : API to register SMMU hw to platform framework.
  409. * @return struct platform_device pointer on on success, or ERR_PTR() on error.
  410. */
  411. int cam_smmu_init_module(void);
  412. /**
  413. * @brief : API to remove SMMU Hw from platform framework.
  414. */
  415. void cam_smmu_exit_module(void);
  416. /**
  417. * @brief : API to determine whether to force all allocations to CACHED
  418. */
  419. int cam_smmu_need_force_alloc_cached(bool *force_alloc_cached);
  420. /**
  421. * @brief : API to determine whether padding is needed for shared buffers
  422. */
  423. bool cam_smmu_need_shared_buffer_padding(void);
  424. /**
  425. * @brief : API to determine whether certain HW is 36-bit memory addressable
  426. */
  427. bool cam_smmu_is_expanded_memory(void);
  428. /**
  429. * @brief : API to query whether page fault non fatal is enable for a device's context bank
  430. */
  431. int cam_smmu_is_cb_non_fatal_fault_en(int smmu_hdl, bool *non_fatal_en);
  432. #endif /* _CAM_SMMU_API_H_ */