cam_lrme_context.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/kernel.h>
  8. #include "cam_debug_util.h"
  9. #include "cam_lrme_context.h"
  10. static const char lrme_dev_name[] = "cam-lrme";
  11. static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
  12. struct cam_acquire_dev_cmd *cmd)
  13. {
  14. int rc = 0;
  15. uintptr_t ctxt_to_hw_map = (uintptr_t)ctx->ctxt_to_hw_map;
  16. struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
  17. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  18. rc = cam_context_acquire_dev_to_hw(ctx, cmd);
  19. if (rc) {
  20. CAM_ERR(CAM_LRME, "Failed to acquire");
  21. return rc;
  22. }
  23. ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT);
  24. ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map;
  25. ctx->state = CAM_CTX_ACQUIRED;
  26. return rc;
  27. }
  28. static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
  29. struct cam_release_dev_cmd *cmd)
  30. {
  31. int rc = 0;
  32. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  33. rc = cam_context_release_dev_to_hw(ctx, cmd);
  34. if (rc) {
  35. CAM_ERR(CAM_LRME, "Failed to release");
  36. return rc;
  37. }
  38. ctx->state = CAM_CTX_AVAILABLE;
  39. return rc;
  40. }
  41. static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
  42. struct cam_start_stop_dev_cmd *cmd)
  43. {
  44. int rc = 0;
  45. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  46. rc = cam_context_start_dev_to_hw(ctx, cmd);
  47. if (rc) {
  48. CAM_ERR(CAM_LRME, "Failed to start");
  49. return rc;
  50. }
  51. ctx->state = CAM_CTX_ACTIVATED;
  52. return rc;
  53. }
  54. static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
  55. struct cam_config_dev_cmd *cmd)
  56. {
  57. int rc;
  58. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  59. rc = cam_context_prepare_dev_to_hw(ctx, cmd);
  60. if (rc) {
  61. CAM_ERR(CAM_LRME, "Failed to config");
  62. return rc;
  63. }
  64. return rc;
  65. }
  66. static int __cam_lrme_ctx_dump_dev_in_activated(
  67. struct cam_context *ctx,
  68. struct cam_dump_req_cmd *cmd)
  69. {
  70. int rc = 0;
  71. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  72. rc = cam_context_dump_dev_to_hw(ctx, cmd);
  73. if (rc)
  74. CAM_ERR(CAM_LRME, "Failed to dump device");
  75. return rc;
  76. }
  77. static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
  78. struct cam_flush_dev_cmd *cmd)
  79. {
  80. int rc;
  81. struct cam_context_utils_flush_args flush_args;
  82. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  83. flush_args.cmd = cmd;
  84. flush_args.flush_active_req = true;
  85. rc = cam_context_flush_dev_to_hw(ctx, &flush_args);
  86. if (rc)
  87. CAM_ERR(CAM_LRME, "Failed to flush device");
  88. return rc;
  89. }
  90. static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
  91. struct cam_start_stop_dev_cmd *cmd)
  92. {
  93. int rc = 0;
  94. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  95. rc = cam_context_stop_dev_to_hw(ctx);
  96. if (rc) {
  97. CAM_ERR(CAM_LRME, "Failed to stop dev");
  98. return rc;
  99. }
  100. ctx->state = CAM_CTX_ACQUIRED;
  101. return rc;
  102. }
  103. static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
  104. struct cam_release_dev_cmd *cmd)
  105. {
  106. int rc = 0;
  107. CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
  108. rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
  109. if (rc) {
  110. CAM_ERR(CAM_LRME, "Failed to stop");
  111. return rc;
  112. }
  113. rc = cam_context_release_dev_to_hw(ctx, cmd);
  114. if (rc) {
  115. CAM_ERR(CAM_LRME, "Failed to release");
  116. return rc;
  117. }
  118. ctx->state = CAM_CTX_AVAILABLE;
  119. return rc;
  120. }
  121. static int __cam_lrme_ctx_handle_irq_in_activated(void *context,
  122. uint32_t evt_id, void *evt_data)
  123. {
  124. int rc;
  125. CAM_DBG(CAM_LRME, "Enter");
  126. rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
  127. if (rc) {
  128. CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc);
  129. return rc;
  130. }
  131. return rc;
  132. }
  133. /* top state machine */
  134. static struct cam_ctx_ops
  135. cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = {
  136. /* Uninit */
  137. {
  138. .ioctl_ops = {},
  139. .crm_ops = {},
  140. .irq_ops = NULL,
  141. },
  142. /* Available */
  143. {
  144. .ioctl_ops = {
  145. .acquire_dev = __cam_lrme_ctx_acquire_dev_in_available,
  146. },
  147. .crm_ops = {},
  148. .irq_ops = NULL,
  149. },
  150. /* Acquired */
  151. {
  152. .ioctl_ops = {
  153. .config_dev = __cam_lrme_ctx_config_dev_in_activated,
  154. .release_dev = __cam_lrme_ctx_release_dev_in_acquired,
  155. .start_dev = __cam_lrme_ctx_start_dev_in_acquired,
  156. },
  157. .crm_ops = {},
  158. .irq_ops = NULL,
  159. },
  160. /* Ready */
  161. {
  162. .ioctl_ops = {},
  163. .crm_ops = {},
  164. .irq_ops = NULL,
  165. },
  166. /* Flushed */
  167. {
  168. .ioctl_ops = {},
  169. },
  170. /* Activate */
  171. {
  172. .ioctl_ops = {
  173. .config_dev = __cam_lrme_ctx_config_dev_in_activated,
  174. .release_dev = __cam_lrme_ctx_release_dev_in_activated,
  175. .stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
  176. .flush_dev = __cam_lrme_ctx_flush_dev_in_activated,
  177. .dump_dev = __cam_lrme_ctx_dump_dev_in_activated,
  178. },
  179. .crm_ops = {},
  180. .irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
  181. },
  182. };
  183. int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
  184. struct cam_context *base_ctx,
  185. struct cam_hw_mgr_intf *hw_intf,
  186. uint32_t index
  187. int img_iommu_hdl)
  188. {
  189. int rc = 0;
  190. CAM_DBG(CAM_LRME, "Enter");
  191. if (!base_ctx || !lrme_ctx) {
  192. CAM_ERR(CAM_LRME, "Invalid input");
  193. return -EINVAL;
  194. }
  195. memset(lrme_ctx, 0, sizeof(*lrme_ctx));
  196. rc = cam_context_init(base_ctx, lrme_dev_name, CAM_LRME, index,
  197. NULL, hw_intf, lrme_ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl);
  198. if (rc) {
  199. CAM_ERR(CAM_LRME, "Failed to init context");
  200. return rc;
  201. }
  202. lrme_ctx->base = base_ctx;
  203. lrme_ctx->index = index;
  204. base_ctx->ctx_priv = lrme_ctx;
  205. base_ctx->state_machine = cam_lrme_ctx_state_machine;
  206. base_ctx->max_hw_update_entries = CAM_CTX_CFG_MAX;
  207. base_ctx->max_in_map_entries = CAM_CTX_CFG_MAX;
  208. base_ctx->max_out_map_entries = CAM_CTX_CFG_MAX;
  209. return rc;
  210. }
  211. int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx)
  212. {
  213. int rc = 0;
  214. CAM_DBG(CAM_LRME, "Enter");
  215. if (!lrme_ctx) {
  216. CAM_ERR(CAM_LRME, "No ctx to deinit");
  217. return -EINVAL;
  218. }
  219. rc = cam_context_deinit(lrme_ctx->base);
  220. memset(lrme_ctx, 0, sizeof(*lrme_ctx));
  221. return rc;
  222. }