cam_ope_context.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/videodev2.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <media/cam_sync.h>
  11. #include <media/cam_defs.h>
  12. #include <media/cam_ope.h>
  13. #include "cam_sync_api.h"
  14. #include "cam_node.h"
  15. #include "cam_context.h"
  16. #include "cam_context_utils.h"
  17. #include "cam_ope_context.h"
  18. #include "cam_req_mgr_util.h"
  19. #include "cam_mem_mgr.h"
  20. #include "cam_trace.h"
  21. #include "cam_debug_util.h"
  22. #include "cam_packet_util.h"
  23. static const char ope_dev_name[] = "cam-ope";
  24. static int cam_ope_context_dump_active_request(void *data, void *args)
  25. {
  26. struct cam_context *ctx = (struct cam_context *)data;
  27. struct cam_ctx_request *req = NULL;
  28. struct cam_ctx_request *req_temp = NULL;
  29. struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args;
  30. int rc = 0;
  31. if (!ctx || !pf_args) {
  32. CAM_ERR(CAM_OPE, "Invalid ctx %pK or pf args %pK",
  33. ctx, pf_args);
  34. return -EINVAL;
  35. }
  36. CAM_INFO(CAM_OPE, "iommu fault for ope ctx %d state %d",
  37. ctx->ctx_id, ctx->state);
  38. list_for_each_entry_safe(req, req_temp,
  39. &ctx->active_req_list, list) {
  40. CAM_INFO(CAM_OPE, "Active req_id: %llu ctx_id: %u",
  41. req->request_id, ctx->ctx_id);
  42. rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data);
  43. if (rc)
  44. CAM_ERR(CAM_OPE, "Failed to dump pf info ctx_id: %u state: %d",
  45. ctx->ctx_id, ctx->state);
  46. }
  47. if (pf_args->pf_context_info.ctx_found) {
  48. /* Send PF notification to UMD if PF found on current CTX */
  49. rc = cam_context_send_pf_evt(ctx, pf_args);
  50. if (rc)
  51. CAM_ERR(CAM_OPE,
  52. "Failed to notify PF event to userspace rc: %d", rc);
  53. }
  54. return rc;
  55. }
  56. static int __cam_ope_acquire_dev_in_available(struct cam_context *ctx,
  57. struct cam_acquire_dev_cmd *cmd)
  58. {
  59. int rc;
  60. rc = cam_context_acquire_dev_to_hw(ctx, cmd);
  61. if (!rc) {
  62. ctx->state = CAM_CTX_ACQUIRED;
  63. trace_cam_context_state("OPE", ctx);
  64. }
  65. return rc;
  66. }
  67. static int __cam_ope_release_dev_in_acquired(struct cam_context *ctx,
  68. struct cam_release_dev_cmd *cmd)
  69. {
  70. int rc;
  71. rc = cam_context_release_dev_to_hw(ctx, cmd);
  72. if (rc)
  73. CAM_ERR(CAM_OPE, "Unable to release device");
  74. ctx->state = CAM_CTX_AVAILABLE;
  75. trace_cam_context_state("OPE", ctx);
  76. return rc;
  77. }
  78. static int __cam_ope_start_dev_in_acquired(struct cam_context *ctx,
  79. struct cam_start_stop_dev_cmd *cmd)
  80. {
  81. int rc;
  82. rc = cam_context_start_dev_to_hw(ctx, cmd);
  83. if (!rc) {
  84. ctx->state = CAM_CTX_READY;
  85. trace_cam_context_state("OPE", ctx);
  86. }
  87. return rc;
  88. }
  89. static int __cam_ope_flush_dev_in_ready(struct cam_context *ctx,
  90. struct cam_flush_dev_cmd *cmd)
  91. {
  92. int rc;
  93. struct cam_context_utils_flush_args flush_args;
  94. flush_args.cmd = cmd;
  95. flush_args.flush_active_req = false;
  96. rc = cam_context_flush_dev_to_hw(ctx, &flush_args);
  97. if (rc)
  98. CAM_ERR(CAM_OPE, "Failed to flush device");
  99. return rc;
  100. }
  101. static int __cam_ope_dump_dev_in_ready(struct cam_context *ctx,
  102. struct cam_dump_req_cmd *cmd)
  103. {
  104. int rc;
  105. rc = cam_context_dump_dev_to_hw(ctx, cmd);
  106. if (rc)
  107. CAM_ERR(CAM_OPE, "Failed to dump device");
  108. return rc;
  109. }
  110. static int __cam_ope_config_dev_in_ready(struct cam_context *ctx,
  111. struct cam_config_dev_cmd *cmd)
  112. {
  113. int rc;
  114. size_t len;
  115. uintptr_t packet_addr;
  116. rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
  117. &packet_addr, &len);
  118. if (rc) {
  119. CAM_ERR(CAM_OPE, "[%s][%d] Can not get packet address",
  120. ctx->dev_name, ctx->ctx_id);
  121. rc = -EINVAL;
  122. return rc;
  123. }
  124. rc = cam_context_prepare_dev_to_hw(ctx, cmd);
  125. if (rc)
  126. CAM_ERR(CAM_OPE, "Failed to prepare device");
  127. cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
  128. return rc;
  129. }
  130. static int __cam_ope_stop_dev_in_ready(struct cam_context *ctx,
  131. struct cam_start_stop_dev_cmd *cmd)
  132. {
  133. int rc;
  134. rc = cam_context_stop_dev_to_hw(ctx);
  135. if (rc)
  136. CAM_ERR(CAM_OPE, "Failed to stop device");
  137. ctx->state = CAM_CTX_ACQUIRED;
  138. trace_cam_context_state("OPE", ctx);
  139. return rc;
  140. }
  141. static int __cam_ope_release_dev_in_ready(struct cam_context *ctx,
  142. struct cam_release_dev_cmd *cmd)
  143. {
  144. int rc;
  145. rc = __cam_ope_stop_dev_in_ready(ctx, NULL);
  146. if (rc)
  147. CAM_ERR(CAM_OPE, "Failed to stop device");
  148. rc = __cam_ope_release_dev_in_acquired(ctx, cmd);
  149. if (rc)
  150. CAM_ERR(CAM_OPE, "Failed to release device");
  151. return rc;
  152. }
  153. static int __cam_ope_handle_buf_done_in_ready(void *ctx,
  154. uint32_t evt_id, void *done)
  155. {
  156. return cam_context_buf_done_from_hw(ctx, done, evt_id);
  157. }
  158. static struct cam_ctx_ops
  159. cam_ope_ctx_state_machine[CAM_CTX_STATE_MAX] = {
  160. /* Uninit */
  161. {
  162. .ioctl_ops = {},
  163. .crm_ops = {},
  164. .irq_ops = NULL,
  165. },
  166. /* Available */
  167. {
  168. .ioctl_ops = {
  169. .acquire_dev = __cam_ope_acquire_dev_in_available,
  170. },
  171. .crm_ops = {},
  172. .irq_ops = NULL,
  173. },
  174. /* Acquired */
  175. {
  176. .ioctl_ops = {
  177. .release_dev = __cam_ope_release_dev_in_acquired,
  178. .start_dev = __cam_ope_start_dev_in_acquired,
  179. .config_dev = __cam_ope_config_dev_in_ready,
  180. .flush_dev = __cam_ope_flush_dev_in_ready,
  181. .dump_dev = __cam_ope_dump_dev_in_ready,
  182. },
  183. .crm_ops = {},
  184. .irq_ops = __cam_ope_handle_buf_done_in_ready,
  185. .pagefault_ops = cam_ope_context_dump_active_request,
  186. },
  187. /* Ready */
  188. {
  189. .ioctl_ops = {
  190. .stop_dev = __cam_ope_stop_dev_in_ready,
  191. .release_dev = __cam_ope_release_dev_in_ready,
  192. .config_dev = __cam_ope_config_dev_in_ready,
  193. .flush_dev = __cam_ope_flush_dev_in_ready,
  194. .dump_dev = __cam_ope_dump_dev_in_ready,
  195. },
  196. .crm_ops = {},
  197. .irq_ops = __cam_ope_handle_buf_done_in_ready,
  198. .pagefault_ops = cam_ope_context_dump_active_request,
  199. },
  200. /* Flushed */
  201. {
  202. .ioctl_ops = {},
  203. },
  204. /* Activated */
  205. {
  206. .ioctl_ops = {},
  207. .crm_ops = {},
  208. .irq_ops = NULL,
  209. .pagefault_ops = cam_ope_context_dump_active_request,
  210. },
  211. };
  212. int cam_ope_context_init(struct cam_ope_context *ctx,
  213. struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id, int img_iommu_hdl)
  214. {
  215. int rc;
  216. if ((!ctx) || (!ctx->base) || (!hw_intf)) {
  217. CAM_ERR(CAM_OPE, "Invalid params: %pK %pK", ctx, hw_intf);
  218. rc = -EINVAL;
  219. goto err;
  220. }
  221. rc = cam_context_init(ctx->base, ope_dev_name, CAM_OPE, ctx_id,
  222. NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl);
  223. if (rc) {
  224. CAM_ERR(CAM_OPE, "Camera Context Base init failed");
  225. goto err;
  226. }
  227. ctx->base->state_machine = cam_ope_ctx_state_machine;
  228. ctx->base->ctx_priv = ctx;
  229. ctx->ctxt_to_hw_map = NULL;
  230. ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX;
  231. ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX;
  232. ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX;
  233. err:
  234. return rc;
  235. }
  236. int cam_ope_context_deinit(struct cam_ope_context *ctx)
  237. {
  238. if ((!ctx) || (!ctx->base)) {
  239. CAM_ERR(CAM_OPE, "Invalid params: %pK", ctx);
  240. return -EINVAL;
  241. }
  242. cam_context_deinit(ctx->base);
  243. memset(ctx, 0, sizeof(*ctx));
  244. return 0;
  245. }