cam_cre_dev.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/io.h>
  8. #include <linux/of.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include "cam_node.h"
  12. #include "cam_hw_mgr_intf.h"
  13. #include "cam_cre_hw_mgr.h"
  14. #include "cam_cre_dev.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_smmu_api.h"
  17. #include "camera_main.h"
  18. #include "cam_context_utils.h"
  19. #define CAM_CRE_DEV_NAME "cam-cre"
  20. struct cam_cre_subdev {
  21. struct cam_subdev sd;
  22. struct cam_node *node;
  23. struct cam_context ctx[CRE_CTX_MAX];
  24. struct cam_cre_context ctx_cre[CRE_CTX_MAX];
  25. struct mutex cre_lock;
  26. int32_t open_cnt;
  27. int32_t reserved;
  28. };
  29. static struct cam_cre_subdev g_cre_dev;
  30. static void cam_cre_dev_iommu_fault_handler(
  31. struct cam_smmu_pf_info *pf_smmu_info)
  32. {
  33. int i, rc;
  34. struct cam_node *node = NULL;
  35. struct cam_hw_dump_pf_args pf_args = {0};
  36. if (!pf_smmu_info || !pf_smmu_info->token) {
  37. CAM_ERR(CAM_CRE, "invalid token in page handler cb");
  38. return;
  39. }
  40. node = (struct cam_node *)pf_smmu_info->token;
  41. pf_args.pf_smmu_info = pf_smmu_info;
  42. for (i = 0; i < node->ctx_size; i++) {
  43. cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args);
  44. if (pf_args.pf_context_info.ctx_found)
  45. /* found ctx and packet of the faulted address */
  46. break;
  47. }
  48. if (i == node->ctx_size) {
  49. /* Faulted ctx not found. Report PF to userspace */
  50. rc = cam_context_send_pf_evt(NULL, &pf_args);
  51. if (rc)
  52. CAM_ERR(CAM_CRE,
  53. "Failed to notify PF event to userspace rc: %d", rc);
  54. }
  55. }
  56. static int cam_cre_subdev_open(struct v4l2_subdev *sd,
  57. struct v4l2_subdev_fh *fh)
  58. {
  59. mutex_lock(&g_cre_dev.cre_lock);
  60. g_cre_dev.open_cnt++;
  61. mutex_unlock(&g_cre_dev.cre_lock);
  62. return 0;
  63. }
  64. static int cam_cre_subdev_close_internal(struct v4l2_subdev *sd,
  65. struct v4l2_subdev_fh *fh)
  66. {
  67. int rc = 0;
  68. struct cam_node *node = v4l2_get_subdevdata(sd);
  69. mutex_lock(&g_cre_dev.cre_lock);
  70. if (g_cre_dev.open_cnt <= 0) {
  71. CAM_DBG(CAM_CRE, "CRE subdev is already closed");
  72. rc = -EINVAL;
  73. goto end;
  74. }
  75. g_cre_dev.open_cnt--;
  76. if (!node) {
  77. CAM_ERR(CAM_CRE, "Node ptr is NULL");
  78. rc = -EINVAL;
  79. goto end;
  80. }
  81. if (g_cre_dev.open_cnt == 0)
  82. cam_node_shutdown(node);
  83. end:
  84. mutex_unlock(&g_cre_dev.cre_lock);
  85. return rc;
  86. }
  87. static int cam_cre_subdev_close(struct v4l2_subdev *sd,
  88. struct v4l2_subdev_fh *fh)
  89. {
  90. bool crm_active = cam_req_mgr_is_open();
  91. if (crm_active) {
  92. CAM_DBG(CAM_ICP, "CRM is ACTIVE, close should be from CRM");
  93. return 0;
  94. }
  95. return cam_cre_subdev_close_internal(sd, fh);
  96. }
  97. static const struct v4l2_subdev_internal_ops cam_cre_subdev_internal_ops = {
  98. .close = cam_cre_subdev_close,
  99. .open = cam_cre_subdev_open,
  100. };
  101. static int cam_cre_subdev_component_bind(struct device *dev,
  102. struct device *master_dev, void *data)
  103. {
  104. int i;
  105. int rc = 0;
  106. struct cam_hw_mgr_intf *hw_mgr_intf;
  107. struct cam_node *node;
  108. int iommu_hdl = -1;
  109. struct platform_device *pdev = to_platform_device(dev);
  110. g_cre_dev.sd.pdev = pdev;
  111. g_cre_dev.sd.internal_ops = &cam_cre_subdev_internal_ops;
  112. rc = cam_subdev_probe(&g_cre_dev.sd, pdev, CAM_CRE_DEV_NAME,
  113. CAM_CRE_DEVICE_TYPE);
  114. if (rc) {
  115. CAM_ERR(CAM_CRE, "CRE cam_subdev_probe failed %d", rc);
  116. goto err;
  117. }
  118. node = (struct cam_node *)g_cre_dev.sd.token;
  119. hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
  120. if (!hw_mgr_intf) {
  121. CAM_ERR(CAM_CRE, "Error allocating memory");
  122. rc = -ENOMEM;
  123. goto hw_alloc_fail;
  124. }
  125. rc = cam_cre_hw_mgr_init(pdev->dev.of_node, hw_mgr_intf,
  126. &iommu_hdl);
  127. if (rc) {
  128. CAM_ERR(CAM_CRE, "Can not initialize CRE HWmanager %d", rc);
  129. goto hw_init_fail;
  130. }
  131. memset(g_cre_dev.ctx_cre, 0, sizeof(g_cre_dev.ctx_cre));
  132. for (i = 0; i < CAM_CRE_CTX_MAX; i++) {
  133. g_cre_dev.ctx_cre[i].base = &g_cre_dev.ctx[i];
  134. rc = cam_cre_context_init(&g_cre_dev.ctx_cre[i],
  135. hw_mgr_intf, i, iommu_hdl);
  136. if (rc) {
  137. CAM_ERR(CAM_CRE, "CRE context init failed %d %d",
  138. i, rc);
  139. goto ctx_init_fail;
  140. }
  141. }
  142. rc = cam_node_init(node, hw_mgr_intf, g_cre_dev.ctx,
  143. CAM_CRE_CTX_MAX, CAM_CRE_DEV_NAME);
  144. if (rc) {
  145. CAM_ERR(CAM_CRE, "CRE node init failed %d", rc);
  146. goto ctx_init_fail;
  147. }
  148. node->sd_handler = cam_cre_subdev_close_internal;
  149. cam_smmu_set_client_page_fault_handler(iommu_hdl,
  150. cam_cre_dev_iommu_fault_handler, node);
  151. g_cre_dev.open_cnt = 0;
  152. mutex_init(&g_cre_dev.cre_lock);
  153. CAM_DBG(CAM_CRE, "Component bound successfully");
  154. return rc;
  155. ctx_init_fail:
  156. for (--i; i >= 0; i--)
  157. if (cam_cre_context_deinit(&g_cre_dev.ctx_cre[i]))
  158. CAM_ERR(CAM_CRE, "deinit fail %d %d", i, rc);
  159. hw_init_fail:
  160. kfree(hw_mgr_intf);
  161. hw_alloc_fail:
  162. if (cam_subdev_remove(&g_cre_dev.sd))
  163. CAM_ERR(CAM_CRE, "remove fail %d", rc);
  164. err:
  165. return rc;
  166. }
  167. static void cam_cre_subdev_component_unbind(struct device *dev,
  168. struct device *master_dev, void *data)
  169. {
  170. int i;
  171. for (i = 0; i < CRE_CTX_MAX; i++)
  172. cam_cre_context_deinit(&g_cre_dev.ctx_cre[i]);
  173. cam_node_deinit(g_cre_dev.node);
  174. cam_subdev_remove(&g_cre_dev.sd);
  175. mutex_destroy(&g_cre_dev.cre_lock);
  176. }
  177. const static struct component_ops cam_cre_subdev_component_ops = {
  178. .bind = cam_cre_subdev_component_bind,
  179. .unbind = cam_cre_subdev_component_unbind,
  180. };
  181. static int cam_cre_subdev_remove(struct platform_device *pdev)
  182. {
  183. component_del(&pdev->dev, &cam_cre_subdev_component_ops);
  184. return 0;
  185. }
  186. static int cam_cre_subdev_probe(struct platform_device *pdev)
  187. {
  188. int rc = 0;
  189. CAM_DBG(CAM_CRE, "Adding CRE sub component");
  190. rc = component_add(&pdev->dev, &cam_cre_subdev_component_ops);
  191. if (rc)
  192. CAM_ERR(CAM_CRE, "failed to add component rc: %d", rc);
  193. return rc;
  194. }
  195. static const struct of_device_id cam_cre_subdev_dt_match[] = {
  196. {
  197. .compatible = "qcom,cam-cre",
  198. },
  199. {}
  200. };
  201. MODULE_DEVICE_TABLE(of, cam_cre_subdev_dt_match);
  202. struct platform_driver cam_cre_subdev_driver = {
  203. .probe = cam_cre_subdev_probe,
  204. .remove = cam_cre_subdev_remove,
  205. .driver = {
  206. .name = "cam_cre",
  207. .of_match_table = cam_cre_subdev_dt_match,
  208. .suppress_bind_attrs = true,
  209. },
  210. };
  211. int cam_cre_subdev_init_module(void)
  212. {
  213. return platform_driver_register(&cam_cre_subdev_driver);
  214. }
  215. void cam_cre_subdev_exit_module(void)
  216. {
  217. platform_driver_unregister(&cam_cre_subdev_driver);
  218. }
  219. MODULE_DESCRIPTION("MSM CRE driver");
  220. MODULE_LICENSE("GPL v2");