cam_compat.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/of_address.h>
  7. #include <linux/slab.h>
  8. #include "cam_compat.h"
  9. #include "cam_debug_util.h"
  10. #include "cam_cpas_api.h"
  11. #include "camera_main.h"
  12. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  13. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  14. {
  15. int rc = 0;
  16. struct device_node *of_node;
  17. struct device_node *mem_node;
  18. struct resource res;
  19. of_node = (icp_fw->fw_dev)->of_node;
  20. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  21. if (!mem_node) {
  22. rc = -ENOMEM;
  23. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  24. goto end;
  25. }
  26. rc = of_address_to_resource(mem_node, 0, &res);
  27. of_node_put(mem_node);
  28. if (rc < 0) {
  29. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  30. goto end;
  31. }
  32. icp_fw->fw_hdl = res.start;
  33. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  34. if (!icp_fw->fw_kva) {
  35. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  36. rc = -ENOMEM;
  37. goto end;
  38. }
  39. memset_io(icp_fw->fw_kva, 0, fw_length);
  40. end:
  41. return rc;
  42. }
  43. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  44. {
  45. iounmap(icp_fw->fw_kva);
  46. }
  47. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  48. {
  49. const uint32_t smmu_se_ife = 0;
  50. uint32_t camera_hw_version, rc = 0;
  51. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  52. if (!rc && qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  53. switch (camera_hw_version) {
  54. case CAM_CPAS_TITAN_170_V100:
  55. case CAM_CPAS_TITAN_170_V110:
  56. case CAM_CPAS_TITAN_175_V100:
  57. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  58. rc = -EINVAL;
  59. break;
  60. default:
  61. break;
  62. }
  63. }
  64. return rc;
  65. }
  66. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  67. bool protect, int32_t offset)
  68. {
  69. int rc = 0;
  70. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  71. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  72. rc = -EINVAL;
  73. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  74. csiphy_dev->csiphy_info[offset]
  75. .csiphy_cpas_cp_reg_mask)) {
  76. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  77. rc = -EINVAL;
  78. }
  79. return rc;
  80. }
  81. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  82. {
  83. int reg_val;
  84. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  85. reg_val |= errata_wa->data.reg_info.value;
  86. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  87. }
  88. static int camera_platform_compare_dev(struct device *dev, const void *data)
  89. {
  90. return platform_bus_type.match(dev, (struct device_driver *) data);
  91. }
  92. static int camera_i2c_compare_dev(struct device *dev, const void *data)
  93. {
  94. return i2c_bus_type.match(dev, (struct device_driver *) data);
  95. }
  96. #else
  97. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  98. {
  99. int rc = 0;
  100. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  101. &icp_fw->fw_hdl, GFP_KERNEL);
  102. if (!icp_fw->fw_kva) {
  103. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  104. rc = -ENOMEM;
  105. }
  106. return rc;
  107. }
  108. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  109. {
  110. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  111. icp_fw->fw_hdl);
  112. }
  113. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  114. {
  115. const uint32_t smmu_se_ife = 0;
  116. uint32_t camera_hw_version, rc = 0;
  117. struct scm_desc description = {
  118. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  119. .args[0] = smmu_se_ife,
  120. .args[1] = safe_trigger,
  121. };
  122. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  123. if (!rc && scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  124. switch (camera_hw_version) {
  125. case CAM_CPAS_TITAN_170_V100:
  126. case CAM_CPAS_TITAN_170_V110:
  127. case CAM_CPAS_TITAN_175_V100:
  128. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  129. rc = -EINVAL;
  130. break;
  131. default:
  132. break;
  133. }
  134. }
  135. return rc;
  136. }
  137. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  138. bool protect, int32_t offset)
  139. {
  140. int rc = 0;
  141. struct scm_desc description = {
  142. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  143. .args[0] = protect,
  144. .args[1] = csiphy_dev->csiphy_info[offset]
  145. .csiphy_cpas_cp_reg_mask,
  146. };
  147. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  148. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  149. rc = -EINVAL;
  150. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  151. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  152. rc = -EINVAL;
  153. }
  154. return rc;
  155. }
  156. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  157. {
  158. int reg_val;
  159. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  160. reg_val |= errata_wa->data.reg_info.value;
  161. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  162. }
  163. static int camera_platform_compare_dev(struct device *dev, void *data)
  164. {
  165. return platform_bus_type.match(dev, (struct device_driver *) data);
  166. }
  167. static int camera_i2c_compare_dev(struct device *dev, void *data)
  168. {
  169. return i2c_bus_type.match(dev, (struct device_driver *) data);
  170. }
  171. #endif
  172. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  173. void cam_free_clear(const void * ptr)
  174. {
  175. kfree_sensitive(ptr);
  176. }
  177. #else
  178. void cam_free_clear(const void * ptr)
  179. {
  180. kzfree(ptr);
  181. }
  182. #endif
  183. /* Callback to compare device from match list before adding as component */
  184. static inline int camera_component_compare_dev(struct device *dev, void *data)
  185. {
  186. return dev == data;
  187. }
  188. /* Add component matches to list for master of aggregate driver */
  189. int camera_component_match_add_drivers(struct device *master_dev,
  190. struct component_match **match_list)
  191. {
  192. int i, rc = 0;
  193. struct platform_device *pdev = NULL;
  194. struct i2c_client *client = NULL;
  195. struct device *start_dev = NULL, *match_dev = NULL;
  196. if (!master_dev || !match_list) {
  197. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  198. rc = -EINVAL;
  199. goto end;
  200. }
  201. for (i = 0; i < ARRAY_SIZE(cam_component_platform_drivers); i++) {
  202. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  203. struct device_driver const *drv =
  204. &cam_component_platform_drivers[i]->driver;
  205. const void *drv_ptr = (const void *)drv;
  206. #else
  207. struct device_driver *drv = &cam_component_platform_drivers[i]->driver;
  208. void *drv_ptr = (void *)drv;
  209. #endif
  210. start_dev = NULL;
  211. while ((match_dev = bus_find_device(&platform_bus_type,
  212. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  213. put_device(start_dev);
  214. pdev = to_platform_device(match_dev);
  215. CAM_DBG(CAM_UTIL, "Adding matched component:%s", pdev->name);
  216. component_match_add(master_dev, match_list,
  217. camera_component_compare_dev, match_dev);
  218. start_dev = match_dev;
  219. }
  220. put_device(start_dev);
  221. }
  222. for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) {
  223. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  224. struct device_driver const *drv =
  225. &cam_component_i2c_drivers[i]->driver;
  226. const void *drv_ptr = (const void *)drv;
  227. #else
  228. struct device_driver *drv = &cam_component_i2c_drivers[i]->driver;
  229. void *drv_ptr = (void *)drv;
  230. #endif
  231. start_dev = NULL;
  232. while ((match_dev = bus_find_device(&i2c_bus_type,
  233. start_dev, drv_ptr, &camera_i2c_compare_dev))) {
  234. put_device(start_dev);
  235. client = to_i2c_client(match_dev);
  236. CAM_DBG(CAM_UTIL, "Adding matched component:%s", client->name);
  237. component_match_add(master_dev, match_list,
  238. camera_component_compare_dev, match_dev);
  239. start_dev = match_dev;
  240. }
  241. put_device(start_dev);
  242. }
  243. end:
  244. return rc;
  245. }
  246. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  247. #include <linux/qcom-iommu-util.h>
  248. void cam_check_iommu_faults(struct iommu_domain *domain,
  249. struct cam_smmu_pf_info *pf_info)
  250. {
  251. struct qcom_iommu_fault_ids fault_ids = {0, 0, 0};
  252. if (qcom_iommu_get_fault_ids(domain, &fault_ids))
  253. CAM_ERR(CAM_SMMU, "Cannot get smmu fault ids");
  254. else
  255. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  256. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  257. pf_info->bid = fault_ids.bid;
  258. pf_info->pid = fault_ids.pid;
  259. pf_info->mid = fault_ids.mid;
  260. }
  261. #else
  262. void cam_check_iommu_faults(struct iommu_domain *domain,
  263. struct cam_smmu_pf_info *pf_info)
  264. {
  265. struct iommu_fault_ids fault_ids = {0, 0, 0};
  266. if (iommu_get_fault_ids(domain, &fault_ids))
  267. CAM_ERR(CAM_SMMU, "Error: Can not get smmu fault ids");
  268. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  269. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  270. pf_info->bid = fault_ids.bid;
  271. pf_info->pid = fault_ids.pid;
  272. pf_info->mid = fault_ids.mid;
  273. }
  274. #endif
  275. static int inline cam_subdev_list_cmp(struct cam_subdev *entry_1, struct cam_subdev *entry_2)
  276. {
  277. if (entry_1->close_seq_prior > entry_2->close_seq_prior)
  278. return 1;
  279. else if (entry_1->close_seq_prior < entry_2->close_seq_prior)
  280. return -1;
  281. else
  282. return 0;
  283. }
  284. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
  285. void cam_smmu_util_iommu_custom(struct device *dev,
  286. dma_addr_t discard_start, size_t discard_length)
  287. {
  288. return;
  289. }
  290. int cam_req_mgr_ordered_list_cmp(void *priv,
  291. const struct list_head *head_1, const struct list_head *head_2)
  292. {
  293. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  294. list_entry(head_2, struct cam_subdev, list));
  295. }
  296. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  297. {
  298. struct dma_buf_map mapping;
  299. int error_code = dma_buf_vmap(dmabuf, &mapping);
  300. if (error_code)
  301. *vaddr = 0;
  302. else
  303. *vaddr = (mapping.is_iomem) ?
  304. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  305. return error_code;
  306. }
  307. int cam_get_ddr_type(void)
  308. {
  309. /* We assume all chipsets running kernel version 5.15+
  310. * to be using only DDR5 based memory.
  311. */
  312. return DDR_TYPE_LPDDR5;
  313. }
  314. #else
  315. void cam_smmu_util_iommu_custom(struct device *dev,
  316. dma_addr_t discard_start, size_t discard_length)
  317. {
  318. iommu_dma_enable_best_fit_algo(dev);
  319. if (discard_start)
  320. iommu_dma_reserve_iova(dev, discard_start, discard_length);
  321. return;
  322. }
  323. int cam_req_mgr_ordered_list_cmp(void *priv,
  324. struct list_head *head_1, struct list_head *head_2)
  325. {
  326. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  327. list_entry(head_2, struct cam_subdev, list));
  328. }
  329. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  330. {
  331. int error_code = 0;
  332. void *addr = dma_buf_vmap(dmabuf);
  333. if (!addr) {
  334. *vaddr = 0;
  335. error_code = -ENOSPC;
  336. } else {
  337. *vaddr = (uintptr_t)addr;
  338. }
  339. return error_code;
  340. }
  341. int cam_get_ddr_type(void)
  342. {
  343. return of_fdt_get_ddrtype();
  344. }
  345. #endif