cam_compat.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/dma-mapping.h>
  7. #include <linux/dma-buf.h>
  8. #include <linux/of_address.h>
  9. #include <linux/slab.h>
  10. #include <soc/qcom/rpmh.h>
  11. #include "cam_compat.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_cpas_api.h"
  14. #include "camera_main.h"
  15. #if IS_ENABLED(CONFIG_USE_RPMH_DRV_API)
  16. #define CAM_RSC_DRV_IDENTIFIER "cam_rsc"
  17. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  18. {
  19. const struct device *rsc_dev;
  20. rsc_dev = rpmh_get_device(CAM_RSC_DRV_IDENTIFIER, index);
  21. if (!rsc_dev) {
  22. CAM_ERR(CAM_CPAS, "Invalid dev for index: %u", index);
  23. return NULL;
  24. }
  25. return rsc_dev;
  26. }
  27. int cam_cpas_start_drv_for_dev(const struct device *dev)
  28. {
  29. int rc = 0;
  30. if (!dev) {
  31. CAM_ERR(CAM_CPAS, "Invalid dev for DRV enable");
  32. return -EINVAL;
  33. }
  34. rc = rpmh_drv_start(dev);
  35. if (rc) {
  36. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV start", dev_name(dev));
  37. return rc;
  38. }
  39. return rc;
  40. }
  41. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  42. {
  43. int rc = 0;
  44. if (!dev) {
  45. CAM_ERR(CAM_CPAS, "Invalid dev for DRV disable");
  46. return -EINVAL;
  47. }
  48. rc = rpmh_drv_stop(dev);
  49. if (rc) {
  50. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV stop", dev_name(dev));
  51. return rc;
  52. }
  53. return rc;
  54. }
  55. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  56. {
  57. int rc = 0;
  58. if (!dev) {
  59. CAM_ERR(CAM_CPAS, "Invalid dev for DRV channel switch");
  60. return -EINVAL;
  61. }
  62. rc = rpmh_write_sleep_and_wake_no_child(dev);
  63. if (rc) {
  64. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV channel switch", dev_name(dev));
  65. return rc;
  66. }
  67. return rc;
  68. }
  69. #else
  70. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  71. {
  72. return NULL;
  73. }
  74. int cam_cpas_start_drv_for_dev(const struct device *dev)
  75. {
  76. return 0;
  77. }
  78. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  79. {
  80. return 0;
  81. }
  82. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  83. {
  84. return 0;
  85. }
  86. #endif
  87. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  88. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  89. {
  90. int rc = 0;
  91. struct device_node *of_node;
  92. struct device_node *mem_node;
  93. struct resource res;
  94. of_node = (icp_fw->fw_dev)->of_node;
  95. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  96. if (!mem_node) {
  97. rc = -ENOMEM;
  98. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  99. goto end;
  100. }
  101. rc = of_address_to_resource(mem_node, 0, &res);
  102. of_node_put(mem_node);
  103. if (rc < 0) {
  104. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  105. goto end;
  106. }
  107. icp_fw->fw_hdl = res.start;
  108. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  109. if (!icp_fw->fw_kva) {
  110. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  111. rc = -ENOMEM;
  112. goto end;
  113. }
  114. memset_io(icp_fw->fw_kva, 0, fw_length);
  115. end:
  116. return rc;
  117. }
  118. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  119. {
  120. iounmap(icp_fw->fw_kva);
  121. }
  122. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  123. {
  124. const uint32_t smmu_se_ife = 0;
  125. uint32_t camera_hw_version, rc = 0;
  126. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  127. if (!rc && qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  128. switch (camera_hw_version) {
  129. case CAM_CPAS_TITAN_170_V100:
  130. case CAM_CPAS_TITAN_170_V110:
  131. case CAM_CPAS_TITAN_175_V100:
  132. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  133. rc = -EINVAL;
  134. break;
  135. default:
  136. break;
  137. }
  138. }
  139. return rc;
  140. }
  141. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  142. bool protect, int32_t offset)
  143. {
  144. int rc = 0;
  145. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  146. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  147. rc = -EINVAL;
  148. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  149. csiphy_dev->csiphy_info[offset].csiphy_cpas_cp_reg_mask)) {
  150. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  151. rc = -EINVAL;
  152. }
  153. return rc;
  154. }
  155. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  156. {
  157. int reg_val;
  158. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  159. reg_val |= errata_wa->data.reg_info.value;
  160. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  161. }
  162. static int camera_platform_compare_dev(struct device *dev, const void *data)
  163. {
  164. return platform_bus_type.match(dev, (struct device_driver *) data);
  165. }
  166. static int camera_i2c_compare_dev(struct device *dev, const void *data)
  167. {
  168. return i2c_bus_type.match(dev, (struct device_driver *) data);
  169. }
  170. #else
  171. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  172. {
  173. int rc = 0;
  174. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  175. &icp_fw->fw_hdl, GFP_KERNEL);
  176. if (!icp_fw->fw_kva) {
  177. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  178. rc = -ENOMEM;
  179. }
  180. return rc;
  181. }
  182. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  183. {
  184. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  185. icp_fw->fw_hdl);
  186. }
  187. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  188. {
  189. const uint32_t smmu_se_ife = 0;
  190. uint32_t camera_hw_version, rc = 0;
  191. struct scm_desc description = {
  192. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  193. .args[0] = smmu_se_ife,
  194. .args[1] = safe_trigger,
  195. };
  196. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  197. if (!rc && scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  198. switch (camera_hw_version) {
  199. case CAM_CPAS_TITAN_170_V100:
  200. case CAM_CPAS_TITAN_170_V110:
  201. case CAM_CPAS_TITAN_175_V100:
  202. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  203. rc = -EINVAL;
  204. break;
  205. default:
  206. break;
  207. }
  208. }
  209. return rc;
  210. }
  211. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  212. bool protect, int32_t offset)
  213. {
  214. int rc = 0;
  215. struct scm_desc description = {
  216. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  217. .args[0] = protect,
  218. .args[1] = csiphy_dev->csiphy_info[offset]
  219. .csiphy_cpas_cp_reg_mask,
  220. };
  221. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  222. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  223. rc = -EINVAL;
  224. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  225. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  226. rc = -EINVAL;
  227. }
  228. return rc;
  229. }
  230. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  231. {
  232. int reg_val;
  233. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  234. reg_val |= errata_wa->data.reg_info.value;
  235. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  236. }
  237. static int camera_platform_compare_dev(struct device *dev, void *data)
  238. {
  239. return platform_bus_type.match(dev, (struct device_driver *) data);
  240. }
  241. static int camera_i2c_compare_dev(struct device *dev, void *data)
  242. {
  243. return i2c_bus_type.match(dev, (struct device_driver *) data);
  244. }
  245. #endif
  246. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  247. void cam_free_clear(const void * ptr)
  248. {
  249. kfree_sensitive(ptr);
  250. }
  251. #else
  252. void cam_free_clear(const void * ptr)
  253. {
  254. kzfree(ptr);
  255. }
  256. #endif
  257. /* Callback to compare device from match list before adding as component */
  258. static inline int camera_component_compare_dev(struct device *dev, void *data)
  259. {
  260. return dev == data;
  261. }
  262. /* Add component matches to list for master of aggregate driver */
  263. int camera_component_match_add_drivers(struct device *master_dev,
  264. struct component_match **match_list)
  265. {
  266. int i, rc = 0;
  267. struct platform_device *pdev = NULL;
  268. struct i2c_client *client = NULL;
  269. struct device *start_dev = NULL, *match_dev = NULL;
  270. if (!master_dev || !match_list) {
  271. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  272. rc = -EINVAL;
  273. goto end;
  274. }
  275. for (i = 0; i < ARRAY_SIZE(cam_component_platform_drivers); i++) {
  276. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  277. struct device_driver const *drv =
  278. &cam_component_platform_drivers[i]->driver;
  279. const void *drv_ptr = (const void *)drv;
  280. #else
  281. struct device_driver *drv = &cam_component_platform_drivers[i]->driver;
  282. void *drv_ptr = (void *)drv;
  283. #endif
  284. start_dev = NULL;
  285. while ((match_dev = bus_find_device(&platform_bus_type,
  286. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  287. put_device(start_dev);
  288. pdev = to_platform_device(match_dev);
  289. CAM_DBG(CAM_UTIL, "Adding matched component:%s", pdev->name);
  290. component_match_add(master_dev, match_list,
  291. camera_component_compare_dev, match_dev);
  292. start_dev = match_dev;
  293. }
  294. put_device(start_dev);
  295. }
  296. for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) {
  297. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  298. struct device_driver const *drv =
  299. &cam_component_i2c_drivers[i]->driver;
  300. const void *drv_ptr = (const void *)drv;
  301. #else
  302. struct device_driver *drv = &cam_component_i2c_drivers[i]->driver;
  303. void *drv_ptr = (void *)drv;
  304. #endif
  305. start_dev = NULL;
  306. while ((match_dev = bus_find_device(&i2c_bus_type,
  307. start_dev, drv_ptr, &camera_i2c_compare_dev))) {
  308. put_device(start_dev);
  309. client = to_i2c_client(match_dev);
  310. CAM_DBG(CAM_UTIL, "Adding matched component:%s", client->name);
  311. component_match_add(master_dev, match_list,
  312. camera_component_compare_dev, match_dev);
  313. start_dev = match_dev;
  314. }
  315. put_device(start_dev);
  316. }
  317. end:
  318. return rc;
  319. }
  320. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  321. #include <linux/qcom-iommu-util.h>
  322. void cam_check_iommu_faults(struct iommu_domain *domain,
  323. struct cam_smmu_pf_info *pf_info)
  324. {
  325. struct qcom_iommu_fault_ids fault_ids = {0, 0, 0};
  326. if (qcom_iommu_get_fault_ids(domain, &fault_ids))
  327. CAM_ERR(CAM_SMMU, "Cannot get smmu fault ids");
  328. else
  329. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  330. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  331. pf_info->bid = fault_ids.bid;
  332. pf_info->pid = fault_ids.pid;
  333. pf_info->mid = fault_ids.mid;
  334. }
  335. #else
  336. void cam_check_iommu_faults(struct iommu_domain *domain,
  337. struct cam_smmu_pf_info *pf_info)
  338. {
  339. struct iommu_fault_ids fault_ids = {0, 0, 0};
  340. if (iommu_get_fault_ids(domain, &fault_ids))
  341. CAM_ERR(CAM_SMMU, "Error: Can not get smmu fault ids");
  342. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  343. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  344. pf_info->bid = fault_ids.bid;
  345. pf_info->pid = fault_ids.pid;
  346. pf_info->mid = fault_ids.mid;
  347. }
  348. #endif
  349. static int inline cam_subdev_list_cmp(struct cam_subdev *entry_1, struct cam_subdev *entry_2)
  350. {
  351. if (entry_1->close_seq_prior > entry_2->close_seq_prior)
  352. return 1;
  353. else if (entry_1->close_seq_prior < entry_2->close_seq_prior)
  354. return -1;
  355. else
  356. return 0;
  357. }
  358. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
  359. void cam_smmu_util_iommu_custom(struct device *dev,
  360. dma_addr_t discard_start, size_t discard_length)
  361. {
  362. return;
  363. }
  364. int cam_req_mgr_ordered_list_cmp(void *priv,
  365. const struct list_head *head_1, const struct list_head *head_2)
  366. {
  367. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  368. list_entry(head_2, struct cam_subdev, list));
  369. }
  370. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  371. {
  372. struct dma_buf_map mapping;
  373. int error_code = dma_buf_vmap(dmabuf, &mapping);
  374. if (error_code) {
  375. *vaddr = 0;
  376. } else {
  377. *vaddr = (mapping.is_iomem) ?
  378. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  379. CAM_DBG(CAM_MEM,
  380. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  381. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  382. }
  383. return error_code;
  384. }
  385. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  386. {
  387. struct dma_buf_map mapping = DMA_BUF_MAP_INIT_VADDR(vaddr);
  388. dma_buf_vunmap(dmabuf, &mapping);
  389. }
  390. void cam_i3c_driver_remove(struct i3c_device *client)
  391. {
  392. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  393. (client ? dev_name(&client->dev) : "none"));
  394. }
  395. #else
  396. void cam_smmu_util_iommu_custom(struct device *dev,
  397. dma_addr_t discard_start, size_t discard_length)
  398. {
  399. iommu_dma_enable_best_fit_algo(dev);
  400. if (discard_start)
  401. iommu_dma_reserve_iova(dev, discard_start, discard_length);
  402. return;
  403. }
  404. int cam_req_mgr_ordered_list_cmp(void *priv,
  405. struct list_head *head_1, struct list_head *head_2)
  406. {
  407. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  408. list_entry(head_2, struct cam_subdev, list));
  409. }
  410. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  411. {
  412. int error_code = 0;
  413. void *addr = dma_buf_vmap(dmabuf);
  414. if (!addr) {
  415. *vaddr = 0;
  416. error_code = -ENOSPC;
  417. } else {
  418. *vaddr = (uintptr_t)addr;
  419. }
  420. return error_code;
  421. }
  422. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  423. {
  424. dma_buf_vunmap(dmabuf, vaddr);
  425. }
  426. int cam_i3c_driver_remove(struct i3c_device *client)
  427. {
  428. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  429. (client ? dev_name(&client->dev) : "none"));
  430. return 0;
  431. }
  432. #endif
  433. #if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE
  434. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  435. {
  436. long ret = 0;
  437. ret = dma_buf_set_name(dmabuf, name);
  438. return ret;
  439. }
  440. #else
  441. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  442. {
  443. return 0;
  444. }
  445. #endif