cam_compat.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/of_address.h>
  7. #include "cam_compat.h"
  8. #include "cam_debug_util.h"
  9. #include "cam_cpas_api.h"
  10. #if KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  11. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  12. {
  13. int rc = 0;
  14. struct device_node *of_node;
  15. struct device_node *mem_node;
  16. struct resource res;
  17. of_node = (icp_fw->fw_dev)->of_node;
  18. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  19. if (!mem_node) {
  20. rc = -ENOMEM;
  21. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  22. goto end;
  23. }
  24. rc = of_address_to_resource(mem_node, 0, &res);
  25. of_node_put(mem_node);
  26. if (rc < 0) {
  27. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  28. goto end;
  29. }
  30. icp_fw->fw_hdl = res.start;
  31. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  32. if (!icp_fw->fw_kva) {
  33. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  34. rc = -ENOMEM;
  35. goto end;
  36. }
  37. memset_io(icp_fw->fw_kva, 0, fw_length);
  38. end:
  39. return rc;
  40. }
  41. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  42. {
  43. iounmap(icp_fw->fw_kva);
  44. }
  45. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  46. {
  47. const uint32_t smmu_se_ife = 0;
  48. uint32_t camera_hw_version, rc = 0;
  49. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  50. if (!rc && qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  51. switch (camera_hw_version) {
  52. case CAM_CPAS_TITAN_170_V100:
  53. case CAM_CPAS_TITAN_170_V110:
  54. case CAM_CPAS_TITAN_175_V100:
  55. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  56. rc = -EINVAL;
  57. break;
  58. default:
  59. break;
  60. }
  61. }
  62. return rc;
  63. }
  64. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  65. bool protect, int32_t offset)
  66. {
  67. int rc = 0;
  68. if (offset >= CSIPHY_MAX_INSTANCES) {
  69. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  70. rc = -EINVAL;
  71. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  72. csiphy_dev->csiphy_cpas_cp_reg_mask[offset])) {
  73. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  74. rc = -EINVAL;
  75. }
  76. return 0;
  77. }
  78. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  79. {
  80. int reg_val;
  81. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  82. reg_val |= errata_wa->data.reg_info.value;
  83. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  84. }
  85. #else
  86. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  87. {
  88. int rc = 0;
  89. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  90. &icp_fw->fw_hdl, GFP_KERNEL);
  91. if (!icp_fw->fw_kva) {
  92. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  93. rc = -ENOMEM;
  94. }
  95. return rc;
  96. }
  97. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  98. {
  99. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  100. icp_fw->fw_hdl);
  101. }
  102. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  103. {
  104. const uint32_t smmu_se_ife = 0;
  105. uint32_t camera_hw_version, rc = 0;
  106. struct scm_desc description = {
  107. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  108. .args[0] = smmu_se_ife,
  109. .args[1] = safe_trigger,
  110. };
  111. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  112. if (!rc && scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  113. switch (camera_hw_version) {
  114. case CAM_CPAS_TITAN_170_V100:
  115. case CAM_CPAS_TITAN_170_V110:
  116. case CAM_CPAS_TITAN_175_V100:
  117. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  118. rc = -EINVAL;
  119. break;
  120. default:
  121. break;
  122. }
  123. }
  124. return rc;
  125. }
  126. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  127. bool protect, int32_t offset)
  128. {
  129. int rc = 0;
  130. struct scm_desc description = {
  131. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  132. .args[0] = protect,
  133. .args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset],
  134. };
  135. if (offset >= CSIPHY_MAX_INSTANCES) {
  136. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  137. rc = -EINVAL;
  138. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  139. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  140. rc = -EINVAL;
  141. }
  142. return 0;
  143. }
  144. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  145. {
  146. int reg_val;
  147. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  148. reg_val |= errata_wa->data.reg_info.value;
  149. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  150. }
  151. #endif