cam_compat.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/of_address.h>
  7. #include "cam_compat.h"
  8. #include "cam_debug_util.h"
  9. #include "cam_cpas_api.h"
  10. #include "camera_main.h"
  11. #if KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  12. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  13. {
  14. int rc = 0;
  15. struct device_node *of_node;
  16. struct device_node *mem_node;
  17. struct resource res;
  18. of_node = (icp_fw->fw_dev)->of_node;
  19. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  20. if (!mem_node) {
  21. rc = -ENOMEM;
  22. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  23. goto end;
  24. }
  25. rc = of_address_to_resource(mem_node, 0, &res);
  26. of_node_put(mem_node);
  27. if (rc < 0) {
  28. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  29. goto end;
  30. }
  31. icp_fw->fw_hdl = res.start;
  32. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  33. if (!icp_fw->fw_kva) {
  34. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  35. rc = -ENOMEM;
  36. goto end;
  37. }
  38. memset_io(icp_fw->fw_kva, 0, fw_length);
  39. end:
  40. return rc;
  41. }
  42. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  43. {
  44. iounmap(icp_fw->fw_kva);
  45. }
  46. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  47. {
  48. const uint32_t smmu_se_ife = 0;
  49. uint32_t camera_hw_version, rc = 0;
  50. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  51. if (!rc && qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  52. switch (camera_hw_version) {
  53. case CAM_CPAS_TITAN_170_V100:
  54. case CAM_CPAS_TITAN_170_V110:
  55. case CAM_CPAS_TITAN_175_V100:
  56. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  57. rc = -EINVAL;
  58. break;
  59. default:
  60. break;
  61. }
  62. }
  63. return rc;
  64. }
  65. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  66. bool protect, int32_t offset)
  67. {
  68. int rc = 0;
  69. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  70. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  71. rc = -EINVAL;
  72. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  73. csiphy_dev->csiphy_info[offset]
  74. .csiphy_cpas_cp_reg_mask)) {
  75. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  76. rc = -EINVAL;
  77. }
  78. return rc;
  79. }
  80. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  81. {
  82. int reg_val;
  83. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  84. reg_val |= errata_wa->data.reg_info.value;
  85. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  86. }
  87. static int camera_platform_compare_dev(struct device *dev, const void *data)
  88. {
  89. return platform_bus_type.match(dev, (struct device_driver *) data);
  90. }
  91. #else
  92. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  93. {
  94. int rc = 0;
  95. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  96. &icp_fw->fw_hdl, GFP_KERNEL);
  97. if (!icp_fw->fw_kva) {
  98. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  99. rc = -ENOMEM;
  100. }
  101. return rc;
  102. }
  103. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  104. {
  105. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  106. icp_fw->fw_hdl);
  107. }
  108. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  109. {
  110. const uint32_t smmu_se_ife = 0;
  111. uint32_t camera_hw_version, rc = 0;
  112. struct scm_desc description = {
  113. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  114. .args[0] = smmu_se_ife,
  115. .args[1] = safe_trigger,
  116. };
  117. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  118. if (!rc && scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  119. switch (camera_hw_version) {
  120. case CAM_CPAS_TITAN_170_V100:
  121. case CAM_CPAS_TITAN_170_V110:
  122. case CAM_CPAS_TITAN_175_V100:
  123. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  124. rc = -EINVAL;
  125. break;
  126. default:
  127. break;
  128. }
  129. }
  130. return rc;
  131. }
  132. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  133. bool protect, int32_t offset)
  134. {
  135. int rc = 0;
  136. struct scm_desc description = {
  137. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  138. .args[0] = protect,
  139. .args[1] = csiphy_dev->csiphy_info[offset]
  140. .csiphy_cpas_cp_reg_mask,
  141. };
  142. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  143. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  144. rc = -EINVAL;
  145. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  146. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  147. rc = -EINVAL;
  148. }
  149. return rc;
  150. }
  151. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  152. {
  153. int reg_val;
  154. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  155. reg_val |= errata_wa->data.reg_info.value;
  156. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  157. }
  158. static int camera_platform_compare_dev(struct device *dev, void *data)
  159. {
  160. return platform_bus_type.match(dev, (struct device_driver *) data);
  161. }
  162. #endif
  163. /* Callback to compare device from match list before adding as component */
  164. static inline int camera_component_compare_dev(struct device *dev, void *data)
  165. {
  166. return dev == data;
  167. }
  168. /* Add component matches to list for master of aggregate driver */
  169. int camera_component_match_add_drivers(struct device *master_dev,
  170. struct component_match **match_list)
  171. {
  172. int i, rc = 0;
  173. struct platform_device *pdev = NULL;
  174. if (!master_dev || !match_list) {
  175. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  176. rc = -EINVAL;
  177. goto end;
  178. }
  179. for (i = 0; i < ARRAY_SIZE(cam_component_drivers); i++) {
  180. #if KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  181. struct device_driver const *drv =
  182. &cam_component_drivers[i]->driver;
  183. const void *drv_ptr = (const void *)drv;
  184. #else
  185. struct device_driver *drv = &cam_component_drivers[i]->driver;
  186. void *drv_ptr = (void *)drv;
  187. #endif
  188. struct device *start_dev = NULL, *match_dev;
  189. while ((match_dev = bus_find_device(&platform_bus_type,
  190. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  191. put_device(start_dev);
  192. pdev = to_platform_device(match_dev);
  193. CAM_DBG(CAM_UTIL, "Adding matched component:%s",
  194. pdev->name);
  195. component_match_add(master_dev, match_list,
  196. camera_component_compare_dev, match_dev);
  197. start_dev = match_dev;
  198. }
  199. put_device(start_dev);
  200. }
  201. end:
  202. return rc;
  203. }