cam_compat.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/of_address.h>
  7. #include "cam_compat.h"
  8. #include "cam_debug_util.h"
  9. #include "cam_cpas_api.h"
  10. #include "camera_main.h"
  11. #if KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  12. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  13. {
  14. int rc = 0;
  15. struct device_node *of_node;
  16. struct device_node *mem_node;
  17. struct resource res;
  18. of_node = (icp_fw->fw_dev)->of_node;
  19. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  20. if (!mem_node) {
  21. rc = -ENOMEM;
  22. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  23. goto end;
  24. }
  25. rc = of_address_to_resource(mem_node, 0, &res);
  26. of_node_put(mem_node);
  27. if (rc < 0) {
  28. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  29. goto end;
  30. }
  31. icp_fw->fw_hdl = res.start;
  32. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  33. if (!icp_fw->fw_kva) {
  34. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  35. rc = -ENOMEM;
  36. goto end;
  37. }
  38. memset_io(icp_fw->fw_kva, 0, fw_length);
  39. end:
  40. return rc;
  41. }
  42. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  43. {
  44. iounmap(icp_fw->fw_kva);
  45. }
  46. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  47. {
  48. const uint32_t smmu_se_ife = 0;
  49. uint32_t camera_hw_version, rc = 0;
  50. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  51. if (!rc && qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  52. switch (camera_hw_version) {
  53. case CAM_CPAS_TITAN_170_V100:
  54. case CAM_CPAS_TITAN_170_V110:
  55. case CAM_CPAS_TITAN_175_V100:
  56. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  57. rc = -EINVAL;
  58. break;
  59. default:
  60. break;
  61. }
  62. }
  63. return rc;
  64. }
  65. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  66. bool protect, int32_t offset)
  67. {
  68. int rc = 0;
  69. if (offset >= CSIPHY_MAX_INSTANCES) {
  70. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  71. rc = -EINVAL;
  72. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  73. csiphy_dev->csiphy_cpas_cp_reg_mask[offset])) {
  74. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  75. rc = -EINVAL;
  76. }
  77. return rc;
  78. }
  79. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  80. {
  81. int reg_val;
  82. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  83. reg_val |= errata_wa->data.reg_info.value;
  84. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  85. }
  86. static int camera_platform_compare_dev(struct device *dev, const void *data)
  87. {
  88. return platform_bus_type.match(dev, (struct device_driver *) data);
  89. }
  90. #else
  91. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  92. {
  93. int rc = 0;
  94. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  95. &icp_fw->fw_hdl, GFP_KERNEL);
  96. if (!icp_fw->fw_kva) {
  97. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  98. rc = -ENOMEM;
  99. }
  100. return rc;
  101. }
  102. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  103. {
  104. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  105. icp_fw->fw_hdl);
  106. }
  107. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  108. {
  109. const uint32_t smmu_se_ife = 0;
  110. uint32_t camera_hw_version, rc = 0;
  111. struct scm_desc description = {
  112. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  113. .args[0] = smmu_se_ife,
  114. .args[1] = safe_trigger,
  115. };
  116. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  117. if (!rc && scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  118. switch (camera_hw_version) {
  119. case CAM_CPAS_TITAN_170_V100:
  120. case CAM_CPAS_TITAN_170_V110:
  121. case CAM_CPAS_TITAN_175_V100:
  122. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  123. rc = -EINVAL;
  124. break;
  125. default:
  126. break;
  127. }
  128. }
  129. return rc;
  130. }
  131. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  132. bool protect, int32_t offset)
  133. {
  134. int rc = 0;
  135. struct scm_desc description = {
  136. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  137. .args[0] = protect,
  138. .args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset],
  139. };
  140. if (offset >= CSIPHY_MAX_INSTANCES) {
  141. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  142. rc = -EINVAL;
  143. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  144. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  145. rc = -EINVAL;
  146. }
  147. return rc;
  148. }
  149. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  150. {
  151. int reg_val;
  152. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  153. reg_val |= errata_wa->data.reg_info.value;
  154. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  155. }
  156. static int camera_platform_compare_dev(struct device *dev, void *data)
  157. {
  158. return platform_bus_type.match(dev, (struct device_driver *) data);
  159. }
  160. #endif
  161. /* Callback to compare device from match list before adding as component */
  162. static inline int camera_component_compare_dev(struct device *dev, void *data)
  163. {
  164. return dev == data;
  165. }
  166. /* Add component matches to list for master of aggregate driver */
  167. int camera_component_match_add_drivers(struct device *master_dev,
  168. struct component_match **match_list)
  169. {
  170. int i, rc = 0;
  171. struct platform_device *pdev = NULL;
  172. if (!master_dev || !match_list) {
  173. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  174. rc = -EINVAL;
  175. goto end;
  176. }
  177. for (i = 0; i < ARRAY_SIZE(cam_component_drivers); i++) {
  178. #if KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  179. struct device_driver const *drv =
  180. &cam_component_drivers[i]->driver;
  181. const void *drv_ptr = (const void *)drv;
  182. #else
  183. struct device_driver *drv = &cam_component_drivers[i]->driver;
  184. void *drv_ptr = (void *)drv;
  185. #endif
  186. struct device *start_dev = NULL, *match_dev;
  187. while ((match_dev = bus_find_device(&platform_bus_type,
  188. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  189. put_device(start_dev);
  190. pdev = to_platform_device(match_dev);
  191. CAM_DBG(CAM_UTIL, "Adding matched component:%s",
  192. pdev->name);
  193. component_match_add(master_dev, match_list,
  194. camera_component_compare_dev, match_dev);
  195. start_dev = match_dev;
  196. }
  197. put_device(start_dev);
  198. }
  199. end:
  200. return rc;
  201. }