cam_icp_context.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/videodev2.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <media/cam_sync.h>
  11. #include <media/cam_defs.h>
  12. #include <media/cam_icp.h>
  13. #include "cam_node.h"
  14. #include "cam_context.h"
  15. #include "cam_context_utils.h"
  16. #include "cam_icp_context.h"
  17. #include "cam_req_mgr_util.h"
  18. #include "cam_mem_mgr.h"
  19. #include "cam_trace.h"
  20. #include "cam_debug_util.h"
  21. #include "cam_packet_util.h"
  22. #include "cam_req_mgr_dev.h"
  23. #include "cam_icp_hw_mgr_intf.h"
  24. static int cam_icp_context_dump_active_request(void *data, void *args)
  25. {
  26. struct cam_context *ctx = (struct cam_context *)data;
  27. struct cam_ctx_request *req = NULL;
  28. struct cam_ctx_request *req_temp = NULL;
  29. struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args;
  30. int rc = 0;
  31. if (!ctx || !pf_args) {
  32. CAM_ERR(CAM_ICP, "Invalid ctx %pK or pf args %pK",
  33. ctx, pf_args);
  34. return -EINVAL;
  35. }
  36. CAM_INFO(CAM_ICP, "[%s] iommu fault for icp ctx %d state %d",
  37. ctx->dev_name, ctx->ctx_id, ctx->state);
  38. list_for_each_entry_safe(req, req_temp,
  39. &ctx->active_req_list, list) {
  40. CAM_INFO(CAM_ICP, "[%s] ctx[%u]: Active req_id: %llu",
  41. ctx->dev_name, ctx->ctx_id, req->request_id);
  42. rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data);
  43. if (rc)
  44. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to dump pf info.ctx->state: %d",
  45. ctx->dev_name, ctx->ctx_id, ctx->state);
  46. }
  47. /*
  48. * Faulted ctx found. Since IPE/BPS instances are shared among contexts,
  49. * faulted ctx is found if and only if the context contains
  50. * faulted buffer
  51. */
  52. if (pf_args->pf_context_info.ctx_found) {
  53. /* Send PF notification to UMD if PF found on current CTX */
  54. rc = cam_context_send_pf_evt(ctx, pf_args);
  55. if (rc)
  56. CAM_ERR(CAM_ICP,
  57. "[%s] ctx[%u]: Failed to notify PF event to userspace rc: %d",
  58. ctx->dev_name, ctx->ctx_id, rc);
  59. }
  60. return rc;
  61. }
  62. static int cam_icp_context_mini_dump(void *priv, void *args)
  63. {
  64. int rc;
  65. struct cam_context *ctx;
  66. if (!priv || !args) {
  67. CAM_ERR(CAM_ICP, "Invalid priv %pK args %pK", priv, args);
  68. return -EINVAL;
  69. }
  70. ctx = (struct cam_context *)priv;
  71. rc = cam_context_mini_dump(ctx, args);
  72. if (rc)
  73. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Mini Dump failed rc %d",
  74. ctx->dev_name, ctx->ctx_id, rc);
  75. return rc;
  76. }
  77. static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
  78. struct cam_acquire_dev_cmd *cmd)
  79. {
  80. int rc;
  81. rc = cam_context_acquire_dev_to_hw(ctx, cmd);
  82. if (!rc) {
  83. ctx->state = CAM_CTX_ACQUIRED;
  84. trace_cam_context_state(ctx->dev_name, ctx);
  85. }
  86. return rc;
  87. }
  88. static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx,
  89. struct cam_release_dev_cmd *cmd)
  90. {
  91. int rc;
  92. cam_common_release_evt_params(ctx->dev_hdl);
  93. rc = cam_context_release_dev_to_hw(ctx, cmd);
  94. if (rc)
  95. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Unable to release device",
  96. ctx->dev_name, ctx->ctx_id);
  97. ctx->state = CAM_CTX_AVAILABLE;
  98. trace_cam_context_state(ctx->dev_name, ctx);
  99. return rc;
  100. }
  101. static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
  102. struct cam_start_stop_dev_cmd *cmd)
  103. {
  104. int rc;
  105. rc = cam_context_start_dev_to_hw(ctx, cmd);
  106. if (!rc) {
  107. ctx->state = CAM_CTX_READY;
  108. trace_cam_context_state(ctx->dev_name, ctx);
  109. }
  110. return rc;
  111. }
  112. static int __cam_icp_dump_dev_in_ready(
  113. struct cam_context *ctx,
  114. struct cam_dump_req_cmd *cmd)
  115. {
  116. int rc;
  117. rc = cam_context_dump_dev_to_hw(ctx, cmd);
  118. if (rc)
  119. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to dump device",
  120. ctx->dev_name, ctx->ctx_id);
  121. return rc;
  122. }
  123. static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
  124. struct cam_flush_dev_cmd *cmd)
  125. {
  126. int rc;
  127. struct cam_context_utils_flush_args flush_args;
  128. flush_args.cmd = cmd;
  129. flush_args.flush_active_req = false;
  130. rc = cam_context_flush_dev_to_hw(ctx, &flush_args);
  131. if (rc)
  132. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to flush device",
  133. ctx->dev_name, ctx->ctx_id);
  134. return rc;
  135. }
  136. static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
  137. struct cam_config_dev_cmd *cmd)
  138. {
  139. int rc;
  140. size_t len;
  141. uintptr_t packet_addr;
  142. struct cam_packet *packet;
  143. size_t remain_len = 0;
  144. rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
  145. &packet_addr, &len);
  146. if (rc) {
  147. CAM_ERR(CAM_ICP, "[%s][%d] Can not get packet address",
  148. ctx->dev_name, ctx->ctx_id);
  149. rc = -EINVAL;
  150. return rc;
  151. }
  152. remain_len = len;
  153. if ((len < sizeof(struct cam_packet)) ||
  154. (cmd->offset >= (len - sizeof(struct cam_packet)))) {
  155. CAM_ERR(CAM_CTXT,
  156. "[%s] ctx[%u]: Invalid offset, len: %zu cmd offset: %llu sizeof packet: %zu",
  157. ctx->dev_name, ctx->ctx_id,
  158. len, cmd->offset, sizeof(struct cam_packet));
  159. goto put_cpu_buf;
  160. }
  161. remain_len -= (size_t)cmd->offset;
  162. packet = (struct cam_packet *) ((uint8_t *)packet_addr +
  163. (uint32_t)cmd->offset);
  164. rc = cam_packet_util_validate_packet(packet, remain_len);
  165. if (rc) {
  166. CAM_ERR(CAM_CTXT, "[%s] ctx[%u]: Invalid packet params, remain length: %zu",
  167. ctx->dev_name, ctx->ctx_id,
  168. remain_len);
  169. goto put_cpu_buf;
  170. }
  171. if (((packet->header.op_code & 0xff) ==
  172. CAM_ICP_OPCODE_IPE_SETTINGS) ||
  173. ((packet->header.op_code & 0xff) ==
  174. CAM_ICP_OPCODE_BPS_SETTINGS) ||
  175. ((packet->header.op_code & 0xff) ==
  176. CAM_ICP_OPCODE_OFE_SETTINGS))
  177. rc = cam_context_config_dev_to_hw(ctx, cmd);
  178. else
  179. rc = cam_context_prepare_dev_to_hw(ctx, cmd);
  180. if (rc)
  181. CAM_ERR(CAM_ICP, "[%s] ctx[%u]:Failed to prepare device",
  182. ctx->dev_name, ctx->ctx_id);
  183. put_cpu_buf:
  184. cam_mem_put_cpu_buf((int32_t) cmd->packet_handle);
  185. return rc;
  186. }
  187. static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx,
  188. struct cam_start_stop_dev_cmd *cmd)
  189. {
  190. int rc;
  191. rc = cam_context_stop_dev_to_hw(ctx);
  192. if (rc)
  193. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to stop device",
  194. ctx->dev_name, ctx->ctx_id);
  195. ctx->state = CAM_CTX_ACQUIRED;
  196. trace_cam_context_state(ctx->dev_name, ctx);
  197. return rc;
  198. }
  199. static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
  200. struct cam_release_dev_cmd *cmd)
  201. {
  202. int rc;
  203. rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
  204. if (rc)
  205. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to stop device",
  206. ctx->dev_name, ctx->ctx_id);
  207. rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
  208. if (rc)
  209. CAM_ERR(CAM_ICP, "Failed to release device");
  210. return rc;
  211. }
  212. static uint32_t cam_icp_context_get_error_code(uint32_t err_type)
  213. {
  214. switch (err_type) {
  215. case CAM_ICP_HW_ERROR_NO_MEM:
  216. return CAM_REQ_MGR_ICP_NO_MEMORY;
  217. case CAM_ICP_HW_ERROR_SYSTEM_FAILURE:
  218. return CAM_REQ_MGR_ICP_SYSTEM_FAILURE;
  219. default:
  220. return 0;
  221. }
  222. }
  223. static int __cam_icp_notify_v4l2_err_evt(struct cam_context *ctx,
  224. uint32_t err_type, uint32_t err_code, uint64_t request_id)
  225. {
  226. struct cam_req_mgr_message req_msg = {0};
  227. int rc;
  228. req_msg.session_hdl = ctx->session_hdl;
  229. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  230. req_msg.u.err_msg.error_type = err_type;
  231. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  232. req_msg.u.err_msg.request_id = request_id;
  233. req_msg.u.err_msg.resource_size = 0x0;
  234. req_msg.u.err_msg.error_code = err_code;
  235. rc = cam_req_mgr_notify_message(&req_msg, V4L_EVENT_CAM_REQ_MGR_ERROR,
  236. V4L_EVENT_CAM_REQ_MGR_EVENT);
  237. if (rc)
  238. CAM_ERR(CAM_ICP,
  239. "[%s] ctx[%u]: Error in notifying the error time for req id:%lld",
  240. ctx->dev_name, ctx->ctx_id, request_id);
  241. CAM_INFO(CAM_ICP,
  242. "[%s] ctx[%u]: notifying error to userspace err type: %d, err code: %u, req id: %llu",
  243. ctx->dev_name, ctx->ctx_id, err_type, err_code, request_id);
  244. return rc;
  245. }
  246. static int cam_icp_ctx_handle_fatal_error(void *ctx, void *err_evt_data)
  247. {
  248. struct cam_icp_hw_error_evt_data *err_evt;
  249. uint32_t err_code = 0;
  250. int rc;
  251. err_evt = (struct cam_icp_hw_error_evt_data *)err_evt_data;
  252. err_code = cam_icp_context_get_error_code(err_evt->err_type);
  253. rc = __cam_icp_notify_v4l2_err_evt(ctx, CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
  254. err_code, err_evt->req_id);
  255. return rc;
  256. }
  257. static int cam_icp_ctx_handle_buf_done_in_ready(void *ctx, void *done_evt_data)
  258. {
  259. struct cam_icp_hw_buf_done_evt_data *buf_done;
  260. buf_done = (struct cam_icp_hw_buf_done_evt_data *)done_evt_data;
  261. return cam_context_buf_done_from_hw(ctx, buf_done->buf_done_data, buf_done->evt_id);
  262. }
  263. static int cam_icp_ctx_handle_error_inducement(void *ctx, void *inject_evt_arg)
  264. {
  265. return cam_context_apply_evt_injection(ctx, inject_evt_arg);
  266. }
  267. static int __cam_icp_ctx_handle_hw_event(void *ctx,
  268. uint32_t evt_id, void *evt_data)
  269. {
  270. int rc;
  271. if (!ctx || !evt_data) {
  272. CAM_ERR(CAM_ICP, "Invalid ctx %s and event data %s",
  273. CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_data));
  274. return -EINVAL;
  275. }
  276. switch (evt_id) {
  277. case CAM_ICP_EVT_ID_BUF_DONE:
  278. rc = cam_icp_ctx_handle_buf_done_in_ready(ctx, evt_data);
  279. break;
  280. case CAM_ICP_EVT_ID_ERROR:
  281. rc = cam_icp_ctx_handle_fatal_error(ctx, evt_data);
  282. break;
  283. case CAM_ICP_EVT_ID_INJECT_EVENT:
  284. rc = cam_icp_ctx_handle_error_inducement(ctx, evt_data);
  285. break;
  286. default:
  287. CAM_ERR(CAM_ICP, "Invalid event id: %u", evt_id);
  288. rc = -EINVAL;
  289. }
  290. return rc;
  291. }
  292. static int cam_icp_context_validate_event_notify_injection(struct cam_context *ctx,
  293. struct cam_hw_inject_evt_param *evt_params)
  294. {
  295. int rc = 0;
  296. uint32_t evt_type;
  297. uint64_t req_id;
  298. req_id = evt_params->req_id;
  299. evt_type = evt_params->u.evt_notify.evt_notify_type;
  300. switch (evt_type) {
  301. case V4L_EVENT_CAM_REQ_MGR_ERROR: {
  302. struct cam_hw_inject_err_evt_param *err_evt_params =
  303. &evt_params->u.evt_notify.u.err_evt_params;
  304. switch (err_evt_params->err_type) {
  305. case CAM_REQ_MGR_ERROR_TYPE_RECOVERY:
  306. case CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY:
  307. break;
  308. default:
  309. CAM_ERR(CAM_ICP,
  310. "[%s] ctx[%u]: Invalid error type: %u for error event injection err code: %u req id: %llu dev hdl: %d",
  311. ctx->dev_name, ctx->ctx_id, err_evt_params->err_type,
  312. err_evt_params->err_code, ctx->dev_hdl);
  313. return -EINVAL;
  314. }
  315. CAM_INFO(CAM_ICP,
  316. "[%s] ctx[%u]: Inject ERR evt: err code: %u err type: %u req id: %llu dev hdl: %d",
  317. ctx->dev_name, ctx->ctx_id, err_evt_params->err_code,
  318. err_evt_params->err_type, req_id, ctx->dev_hdl);
  319. break;
  320. }
  321. case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: {
  322. struct cam_hw_inject_pf_evt_param *pf_evt_params =
  323. &evt_params->u.evt_notify.u.pf_evt_params;
  324. bool non_fatal_en;
  325. rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en);
  326. if (rc) {
  327. CAM_ERR(CAM_ICP,
  328. "[%s] ctx[%u]: Fail to query whether device's cb has non-fatal enabled rc: %d",
  329. ctx->dev_name, ctx->ctx_id, rc);
  330. return rc;
  331. }
  332. if (!non_fatal_en) {
  333. CAM_ERR(CAM_ICP,
  334. "[%s] ctx[%u]: Fail to inject page fault event notification. Page fault is fatal for ICP",
  335. ctx->dev_name, ctx->ctx_id);
  336. return -EINVAL;
  337. }
  338. CAM_INFO(CAM_ICP,
  339. "[%s] ctx[%u]: Inject PF evt: req_id: %llu dev hdl: %d ctx found: %hhu",
  340. ctx->dev_name, ctx->ctx_id,
  341. req_id, ctx->dev_hdl, pf_evt_params->ctx_found);
  342. break;
  343. }
  344. default:
  345. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Event notification type not supported: %u",
  346. ctx->dev_name, ctx->ctx_id, evt_type);
  347. rc = -EINVAL;
  348. }
  349. return rc;
  350. }
  351. static int cam_icp_context_inject_evt(void *context, void *evt_args)
  352. {
  353. struct cam_context *ctx = context;
  354. struct cam_hw_inject_evt_param *evt_params = NULL;
  355. struct cam_hw_inject_buffer_error_param *buf_err_params = NULL;
  356. int rc = 0;
  357. if (!ctx || !evt_args) {
  358. CAM_ERR(CAM_ICP,
  359. "invalid params ctx %s event args %s",
  360. CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args));
  361. return -EINVAL;
  362. }
  363. evt_params = (struct cam_hw_inject_evt_param *)evt_args;
  364. if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE) {
  365. buf_err_params = &evt_params->u.buf_err_evt;
  366. if (buf_err_params->sync_error > CAM_SYNC_ICP_EVENT_START ||
  367. buf_err_params->sync_error < CAM_SYNC_ICP_EVENT_END) {
  368. CAM_INFO(CAM_ICP, "[%s] ctx[%u]: Inject buffer sync error %u req id %llu",
  369. ctx->dev_name, ctx->ctx_id, buf_err_params->sync_error,
  370. evt_params->req_id);
  371. } else {
  372. CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Invalid buffer sync error %u req id %llu",
  373. ctx->dev_name, ctx->ctx_id, buf_err_params->sync_error,
  374. evt_params->req_id);
  375. return -EINVAL;
  376. }
  377. } else {
  378. rc = cam_icp_context_validate_event_notify_injection(ctx, evt_params);
  379. if (rc) {
  380. CAM_ERR(CAM_ICP,
  381. "[%s] ctx[%u]: Event notification injection failed validation rc: %d",
  382. ctx->dev_name, ctx->ctx_id, rc);
  383. return -EINVAL;
  384. }
  385. }
  386. if (ctx->hw_mgr_intf->hw_inject_evt)
  387. ctx->hw_mgr_intf->hw_inject_evt(ctx->ctxt_to_hw_map, evt_args);
  388. return rc;
  389. }
  390. static struct cam_ctx_ops
  391. cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = {
  392. /* Uninit */
  393. {
  394. .ioctl_ops = {},
  395. .crm_ops = {},
  396. .irq_ops = NULL,
  397. },
  398. /* Available */
  399. {
  400. .ioctl_ops = {
  401. .acquire_dev = __cam_icp_acquire_dev_in_available,
  402. },
  403. .crm_ops = {},
  404. .irq_ops = NULL,
  405. .mini_dump_ops = cam_icp_context_mini_dump,
  406. },
  407. /* Acquired */
  408. {
  409. .ioctl_ops = {
  410. .release_dev = __cam_icp_release_dev_in_acquired,
  411. .start_dev = __cam_icp_start_dev_in_acquired,
  412. .config_dev = __cam_icp_config_dev_in_ready,
  413. .flush_dev = __cam_icp_flush_dev_in_ready,
  414. .dump_dev = __cam_icp_dump_dev_in_ready,
  415. },
  416. .crm_ops = {},
  417. .irq_ops = __cam_icp_ctx_handle_hw_event,
  418. .pagefault_ops = cam_icp_context_dump_active_request,
  419. .mini_dump_ops = cam_icp_context_mini_dump,
  420. .evt_inject_ops = cam_icp_context_inject_evt,
  421. },
  422. /* Ready */
  423. {
  424. .ioctl_ops = {
  425. .stop_dev = __cam_icp_stop_dev_in_ready,
  426. .release_dev = __cam_icp_release_dev_in_ready,
  427. .config_dev = __cam_icp_config_dev_in_ready,
  428. .flush_dev = __cam_icp_flush_dev_in_ready,
  429. .dump_dev = __cam_icp_dump_dev_in_ready,
  430. },
  431. .crm_ops = {},
  432. .irq_ops = __cam_icp_ctx_handle_hw_event,
  433. .pagefault_ops = cam_icp_context_dump_active_request,
  434. .mini_dump_ops = cam_icp_context_mini_dump,
  435. .evt_inject_ops = cam_icp_context_inject_evt,
  436. },
  437. /* Flushed */
  438. {
  439. .ioctl_ops = {},
  440. },
  441. /* Activated */
  442. {
  443. .ioctl_ops = {},
  444. .crm_ops = {},
  445. .irq_ops = NULL,
  446. .pagefault_ops = cam_icp_context_dump_active_request,
  447. .mini_dump_ops = cam_icp_context_mini_dump,
  448. .evt_inject_ops = cam_icp_context_inject_evt,
  449. },
  450. };
  451. int cam_icp_context_init(struct cam_icp_context *ctx, struct cam_hw_mgr_intf *hw_intf,
  452. uint32_t ctx_id, int img_iommu_hdl, const char *icp_dev_name)
  453. {
  454. int rc;
  455. if ((!ctx) || (!ctx->base) || (!hw_intf) || (!icp_dev_name)) {
  456. CAM_ERR(CAM_ICP,
  457. "Invalid params: ctx: %s hw intf: %s dev name: %s",
  458. CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(hw_intf),
  459. CAM_IS_NULL_TO_STR(icp_dev_name));
  460. rc = -EINVAL;
  461. goto err;
  462. }
  463. rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id,
  464. NULL, hw_intf, ctx->req_base, CAM_CTX_ICP_REQ_MAX, img_iommu_hdl);
  465. if (rc) {
  466. CAM_ERR(CAM_ICP, "[%s] Camera Context Base init failed", icp_dev_name);
  467. goto err;
  468. }
  469. ctx->base->state_machine = cam_icp_ctx_state_machine;
  470. ctx->base->ctx_priv = ctx;
  471. ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX;
  472. ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX;
  473. ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX;
  474. ctx->ctxt_to_hw_map = NULL;
  475. err:
  476. return rc;
  477. }
  478. int cam_icp_context_deinit(struct cam_icp_context *ctx)
  479. {
  480. if ((!ctx) || (!ctx->base)) {
  481. CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
  482. return -EINVAL;
  483. }
  484. cam_context_deinit(ctx->base);
  485. memset(ctx, 0, sizeof(*ctx));
  486. return 0;
  487. }