cam_custom_context.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/videodev2.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/ratelimit.h>
  10. #include "cam_mem_mgr.h"
  11. #include "cam_sync_api.h"
  12. #include "cam_req_mgr_dev.h"
  13. #include "cam_trace.h"
  14. #include "cam_debug_util.h"
  15. #include "cam_packet_util.h"
  16. #include "cam_context_utils.h"
  17. #include "cam_custom_context.h"
  18. #include "cam_common_util.h"
  19. static const char custom_dev_name[] = "cam-custom";
  20. static int __cam_custom_ctx_handle_irq_in_activated(
  21. void *context, uint32_t evt_id, void *evt_data);
  22. static int __cam_custom_ctx_enqueue_request_in_order(
  23. struct cam_context *ctx, struct cam_ctx_request *req)
  24. {
  25. struct cam_ctx_request *req_current;
  26. struct cam_ctx_request *req_prev;
  27. struct list_head temp_list;
  28. INIT_LIST_HEAD(&temp_list);
  29. spin_lock_bh(&ctx->lock);
  30. if (list_empty(&ctx->pending_req_list)) {
  31. list_add_tail(&req->list, &ctx->pending_req_list);
  32. } else {
  33. list_for_each_entry_safe_reverse(
  34. req_current, req_prev, &ctx->pending_req_list, list) {
  35. if (req->request_id < req_current->request_id) {
  36. list_del_init(&req_current->list);
  37. list_add(&req_current->list, &temp_list);
  38. continue;
  39. } else if (req->request_id == req_current->request_id) {
  40. CAM_WARN(CAM_CUSTOM,
  41. "Received duplicated request %lld",
  42. req->request_id);
  43. }
  44. break;
  45. }
  46. list_add_tail(&req->list, &ctx->pending_req_list);
  47. if (!list_empty(&temp_list)) {
  48. list_for_each_entry_safe(
  49. req_current, req_prev, &temp_list, list) {
  50. list_del_init(&req_current->list);
  51. list_add_tail(&req_current->list,
  52. &ctx->pending_req_list);
  53. }
  54. }
  55. }
  56. spin_unlock_bh(&ctx->lock);
  57. return 0;
  58. }
  59. static int __cam_custom_ctx_flush_req(struct cam_context *ctx,
  60. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  61. {
  62. int i, rc;
  63. uint32_t cancel_req_id_found = 0;
  64. struct cam_ctx_request *req;
  65. struct cam_ctx_request *req_temp;
  66. struct cam_custom_dev_ctx_req *req_custom;
  67. struct list_head flush_list;
  68. INIT_LIST_HEAD(&flush_list);
  69. if (list_empty(req_list)) {
  70. CAM_DBG(CAM_CUSTOM, "request list is empty");
  71. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  72. CAM_ERR(CAM_CUSTOM, "no request to cancel");
  73. return -EINVAL;
  74. } else {
  75. return 0;
  76. }
  77. }
  78. CAM_DBG(CAM_CUSTOM, "Flush [%u] in progress for req_id %llu",
  79. flush_req->type, flush_req->req_id);
  80. list_for_each_entry_safe(req, req_temp, req_list, list) {
  81. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  82. if (req->request_id != flush_req->req_id) {
  83. continue;
  84. } else {
  85. list_del_init(&req->list);
  86. list_add_tail(&req->list, &flush_list);
  87. cancel_req_id_found = 1;
  88. break;
  89. }
  90. }
  91. list_del_init(&req->list);
  92. list_add_tail(&req->list, &flush_list);
  93. }
  94. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  95. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  96. for (i = 0; i < req_custom->num_fence_map_out; i++) {
  97. if (req_custom->fence_map_out[i].sync_id != -1) {
  98. CAM_DBG(CAM_CUSTOM,
  99. "Flush req 0x%llx, fence %d",
  100. req->request_id,
  101. req_custom->fence_map_out[i].sync_id);
  102. rc = cam_sync_signal(
  103. req_custom->fence_map_out[i].sync_id,
  104. CAM_SYNC_STATE_SIGNALED_ERROR);
  105. if (rc)
  106. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  107. "signal fence failed\n");
  108. req_custom->fence_map_out[i].sync_id = -1;
  109. }
  110. }
  111. list_add_tail(&req->list, &ctx->free_req_list);
  112. }
  113. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
  114. !cancel_req_id_found)
  115. CAM_DBG(CAM_CUSTOM,
  116. "Flush request id:%lld is not found in the list",
  117. flush_req->req_id);
  118. return 0;
  119. }
  120. static int __cam_custom_ctx_flush_req_in_top_state(
  121. struct cam_context *ctx,
  122. struct cam_req_mgr_flush_request *flush_req)
  123. {
  124. int rc = 0;
  125. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  126. CAM_INFO(CAM_CUSTOM, "Last request id to flush is %lld",
  127. flush_req->req_id);
  128. ctx->last_flush_req = flush_req->req_id;
  129. }
  130. spin_lock_bh(&ctx->lock);
  131. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  132. spin_unlock_bh(&ctx->lock);
  133. return rc;
  134. }
  135. static int __cam_custom_ctx_flush_req_in_ready(
  136. struct cam_context *ctx,
  137. struct cam_req_mgr_flush_request *flush_req)
  138. {
  139. int rc = 0;
  140. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  141. spin_lock_bh(&ctx->lock);
  142. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  143. /* if nothing is in pending req list, change state to acquire */
  144. if (list_empty(&ctx->pending_req_list))
  145. ctx->state = CAM_CTX_ACQUIRED;
  146. spin_unlock_bh(&ctx->lock);
  147. CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d",
  148. ctx->state);
  149. return rc;
  150. }
  151. static int __cam_custom_ctx_unlink_in_ready(struct cam_context *ctx,
  152. struct cam_req_mgr_core_dev_link_setup *unlink)
  153. {
  154. ctx->link_hdl = -1;
  155. ctx->ctx_crm_intf = NULL;
  156. ctx->state = CAM_CTX_ACQUIRED;
  157. return 0;
  158. }
  159. static int __cam_custom_stop_dev_core(
  160. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  161. {
  162. int rc = 0;
  163. uint32_t i;
  164. struct cam_custom_context *ctx_custom =
  165. (struct cam_custom_context *) ctx->ctx_priv;
  166. struct cam_ctx_request *req;
  167. struct cam_custom_dev_ctx_req *req_custom;
  168. struct cam_hw_stop_args stop;
  169. if (ctx_custom->hw_ctx) {
  170. stop.ctxt_to_hw_map = ctx_custom->hw_ctx;
  171. stop.args = NULL;
  172. if (ctx->hw_mgr_intf->hw_stop)
  173. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  174. &stop);
  175. }
  176. while (!list_empty(&ctx->pending_req_list)) {
  177. req = list_first_entry(&ctx->pending_req_list,
  178. struct cam_ctx_request, list);
  179. list_del_init(&req->list);
  180. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  181. CAM_DBG(CAM_CUSTOM,
  182. "signal fence in pending list. fence num %d",
  183. req_custom->num_fence_map_out);
  184. for (i = 0; i < req_custom->num_fence_map_out; i++)
  185. if (req_custom->fence_map_out[i].sync_id != -1) {
  186. cam_sync_signal(
  187. req_custom->fence_map_out[i].sync_id,
  188. CAM_SYNC_STATE_SIGNALED_ERROR);
  189. }
  190. list_add_tail(&req->list, &ctx->free_req_list);
  191. }
  192. while (!list_empty(&ctx->wait_req_list)) {
  193. req = list_first_entry(&ctx->wait_req_list,
  194. struct cam_ctx_request, list);
  195. list_del_init(&req->list);
  196. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  197. CAM_DBG(CAM_CUSTOM, "signal fence in wait list. fence num %d",
  198. req_custom->num_fence_map_out);
  199. for (i = 0; i < req_custom->num_fence_map_out; i++)
  200. if (req_custom->fence_map_out[i].sync_id != -1) {
  201. cam_sync_signal(
  202. req_custom->fence_map_out[i].sync_id,
  203. CAM_SYNC_STATE_SIGNALED_ERROR);
  204. }
  205. list_add_tail(&req->list, &ctx->free_req_list);
  206. }
  207. while (!list_empty(&ctx->active_req_list)) {
  208. req = list_first_entry(&ctx->active_req_list,
  209. struct cam_ctx_request, list);
  210. list_del_init(&req->list);
  211. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  212. CAM_DBG(CAM_CUSTOM, "signal fence in active list. fence num %d",
  213. req_custom->num_fence_map_out);
  214. for (i = 0; i < req_custom->num_fence_map_out; i++)
  215. if (req_custom->fence_map_out[i].sync_id != -1) {
  216. cam_sync_signal(
  217. req_custom->fence_map_out[i].sync_id,
  218. CAM_SYNC_STATE_SIGNALED_ERROR);
  219. }
  220. list_add_tail(&req->list, &ctx->free_req_list);
  221. }
  222. ctx_custom->frame_id = 0;
  223. ctx_custom->active_req_cnt = 0;
  224. CAM_DBG(CAM_CUSTOM, "Stop device success next state %d on ctx %u",
  225. ctx->state, ctx->ctx_id);
  226. if (!stop_cmd) {
  227. rc = __cam_custom_ctx_unlink_in_ready(ctx, NULL);
  228. if (rc)
  229. CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc);
  230. }
  231. return rc;
  232. }
  233. static int __cam_custom_stop_dev_in_activated(struct cam_context *ctx,
  234. struct cam_start_stop_dev_cmd *cmd)
  235. {
  236. struct cam_custom_context *ctx_custom =
  237. (struct cam_custom_context *)ctx->ctx_priv;
  238. __cam_custom_stop_dev_core(ctx, cmd);
  239. ctx_custom->init_received = false;
  240. ctx->state = CAM_CTX_ACQUIRED;
  241. return 0;
  242. }
  243. static int __cam_custom_ctx_release_hw_in_top_state(
  244. struct cam_context *ctx, void *cmd)
  245. {
  246. int rc = 0;
  247. struct cam_hw_release_args rel_arg;
  248. struct cam_req_mgr_flush_request flush_req;
  249. struct cam_custom_context *custom_ctx =
  250. (struct cam_custom_context *) ctx->ctx_priv;
  251. if (custom_ctx->hw_ctx) {
  252. rel_arg.ctxt_to_hw_map = custom_ctx->hw_ctx;
  253. rc = ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  254. &rel_arg);
  255. custom_ctx->hw_ctx = NULL;
  256. if (rc)
  257. CAM_ERR(CAM_CUSTOM,
  258. "Failed to release HW for ctx:%u", ctx->ctx_id);
  259. } else {
  260. CAM_ERR(CAM_CUSTOM, "No HW resources acquired for this ctx");
  261. }
  262. ctx->last_flush_req = 0;
  263. custom_ctx->frame_id = 0;
  264. custom_ctx->active_req_cnt = 0;
  265. custom_ctx->hw_acquired = false;
  266. custom_ctx->init_received = false;
  267. /* check for active requests as well */
  268. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  269. flush_req.link_hdl = ctx->link_hdl;
  270. flush_req.dev_hdl = ctx->dev_hdl;
  271. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  272. spin_lock_bh(&ctx->lock);
  273. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list,
  274. &flush_req);
  275. spin_unlock_bh(&ctx->lock);
  276. ctx->state = CAM_CTX_ACQUIRED;
  277. CAM_DBG(CAM_CUSTOM, "Release HW success[%u] next state %d",
  278. ctx->ctx_id, ctx->state);
  279. return rc;
  280. }
  281. static int __cam_custom_ctx_release_hw_in_activated_state(
  282. struct cam_context *ctx, void *cmd)
  283. {
  284. int rc = 0;
  285. rc = __cam_custom_stop_dev_in_activated(ctx, NULL);
  286. if (rc)
  287. CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  288. rc = __cam_custom_ctx_release_hw_in_top_state(ctx, cmd);
  289. if (rc)
  290. CAM_ERR(CAM_CUSTOM, "Release hw failed rc=%d", rc);
  291. return rc;
  292. }
  293. static int __cam_custom_release_dev_in_acquired(struct cam_context *ctx,
  294. struct cam_release_dev_cmd *cmd)
  295. {
  296. int rc;
  297. struct cam_custom_context *ctx_custom =
  298. (struct cam_custom_context *) ctx->ctx_priv;
  299. struct cam_req_mgr_flush_request flush_req;
  300. if (cmd && ctx_custom->hw_ctx) {
  301. CAM_ERR(CAM_CUSTOM, "releasing hw");
  302. __cam_custom_ctx_release_hw_in_top_state(ctx, NULL);
  303. }
  304. ctx->ctx_crm_intf = NULL;
  305. ctx->last_flush_req = 0;
  306. ctx_custom->frame_id = 0;
  307. ctx_custom->active_req_cnt = 0;
  308. ctx_custom->hw_acquired = false;
  309. ctx_custom->init_received = false;
  310. if (!list_empty(&ctx->active_req_list))
  311. CAM_ERR(CAM_CUSTOM, "Active list is not empty");
  312. /* Flush all the pending request list */
  313. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  314. flush_req.link_hdl = ctx->link_hdl;
  315. flush_req.dev_hdl = ctx->dev_hdl;
  316. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  317. spin_lock_bh(&ctx->lock);
  318. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list,
  319. &flush_req);
  320. spin_unlock_bh(&ctx->lock);
  321. ctx->state = CAM_CTX_AVAILABLE;
  322. CAM_DBG(CAM_CUSTOM, "Release device success[%u] next state %d",
  323. ctx->ctx_id, ctx->state);
  324. return rc;
  325. }
  326. static int __cam_custom_ctx_apply_req_in_activated_state(
  327. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  328. {
  329. int rc = 0;
  330. struct cam_ctx_request *req;
  331. struct cam_custom_dev_ctx_req *req_custom;
  332. struct cam_custom_context *custom_ctx = NULL;
  333. struct cam_hw_config_args cfg;
  334. if (list_empty(&ctx->pending_req_list)) {
  335. CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld",
  336. apply->request_id);
  337. rc = -EFAULT;
  338. goto end;
  339. }
  340. custom_ctx = (struct cam_custom_context *) ctx->ctx_priv;
  341. spin_lock_bh(&ctx->lock);
  342. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  343. list);
  344. spin_unlock_bh(&ctx->lock);
  345. /*
  346. * Check whether the request id is matching the tip
  347. */
  348. if (req->request_id != apply->request_id) {
  349. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  350. "Invalid Request Id asking %llu existing %llu",
  351. apply->request_id, req->request_id);
  352. rc = -EFAULT;
  353. goto end;
  354. }
  355. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  356. cfg.ctxt_to_hw_map = custom_ctx->hw_ctx;
  357. cfg.request_id = req->request_id;
  358. cfg.hw_update_entries = req_custom->cfg;
  359. cfg.num_hw_update_entries = req_custom->num_cfg;
  360. cfg.priv = &req_custom->hw_update_data;
  361. cfg.init_packet = 0;
  362. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  363. if (rc) {
  364. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  365. "Can not apply the configuration");
  366. } else {
  367. spin_lock_bh(&ctx->lock);
  368. list_del_init(&req->list);
  369. if (!req->num_out_map_entries) {
  370. list_add_tail(&req->list, &ctx->free_req_list);
  371. spin_unlock_bh(&ctx->lock);
  372. } else {
  373. list_add_tail(&req->list, &ctx->active_req_list);
  374. spin_unlock_bh(&ctx->lock);
  375. /*
  376. * for test purposes only-this should be
  377. * triggered based on irq
  378. */
  379. __cam_custom_ctx_handle_irq_in_activated(ctx, 0, NULL);
  380. }
  381. }
  382. end:
  383. return rc;
  384. }
  385. static int __cam_custom_ctx_acquire_hw_v1(
  386. struct cam_context *ctx, void *args)
  387. {
  388. int rc = 0;
  389. struct cam_acquire_hw_cmd_v1 *cmd =
  390. (struct cam_acquire_hw_cmd_v1 *)args;
  391. struct cam_hw_acquire_args param;
  392. struct cam_custom_context *ctx_custom =
  393. (struct cam_custom_context *) ctx->ctx_priv;
  394. struct cam_custom_acquire_hw_info *acquire_hw_info = NULL;
  395. if (!ctx->hw_mgr_intf) {
  396. CAM_ERR(CAM_CUSTOM, "HW interface is not ready");
  397. rc = -EFAULT;
  398. goto end;
  399. }
  400. CAM_DBG(CAM_CUSTOM,
  401. "session_hdl 0x%x, hdl type %d, res %lld",
  402. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  403. if (cmd->handle_type != 1) {
  404. CAM_ERR(CAM_CUSTOM, "Only user pointer is supported");
  405. rc = -EINVAL;
  406. goto end;
  407. }
  408. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  409. CAM_ERR(CAM_CUSTOM, "data_size is not a valid value");
  410. goto end;
  411. }
  412. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  413. if (!acquire_hw_info) {
  414. rc = -ENOMEM;
  415. goto end;
  416. }
  417. CAM_DBG(CAM_CUSTOM, "start copy resources from user");
  418. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  419. cmd->data_size)) {
  420. rc = -EFAULT;
  421. goto free_res;
  422. }
  423. memset(&param, 0, sizeof(param));
  424. param.context_data = ctx;
  425. param.event_cb = ctx->irq_cb_intf;
  426. param.acquire_info_size = cmd->data_size;
  427. param.acquire_info = (uint64_t) acquire_hw_info;
  428. /* call HW manager to reserve the resource */
  429. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  430. &param);
  431. if (rc != 0) {
  432. CAM_ERR(CAM_CUSTOM, "Acquire HW failed");
  433. goto free_res;
  434. }
  435. ctx_custom->hw_ctx = param.ctxt_to_hw_map;
  436. ctx_custom->hw_acquired = true;
  437. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  438. CAM_DBG(CAM_CUSTOM,
  439. "Acquire HW success on session_hdl 0x%xs for ctx_id %u",
  440. ctx->session_hdl, ctx->ctx_id);
  441. kfree(acquire_hw_info);
  442. return rc;
  443. free_res:
  444. kfree(acquire_hw_info);
  445. end:
  446. return rc;
  447. }
  448. static int __cam_custom_ctx_acquire_dev_in_available(
  449. struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd)
  450. {
  451. int rc = 0;
  452. struct cam_create_dev_hdl req_hdl_param;
  453. if (!ctx->hw_mgr_intf) {
  454. CAM_ERR(CAM_CUSTOM, "HW interface is not ready");
  455. rc = -EFAULT;
  456. return rc;
  457. }
  458. CAM_DBG(CAM_CUSTOM,
  459. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  460. cmd->session_handle, cmd->num_resources,
  461. cmd->handle_type, cmd->resource_hdl);
  462. if (cmd->num_resources != CAM_API_COMPAT_CONSTANT) {
  463. CAM_ERR(CAM_CUSTOM, "Invalid num_resources 0x%x",
  464. cmd->num_resources);
  465. return -EINVAL;
  466. }
  467. req_hdl_param.session_hdl = cmd->session_handle;
  468. req_hdl_param.v4l2_sub_dev_flag = 0;
  469. req_hdl_param.media_entity_flag = 0;
  470. req_hdl_param.ops = ctx->crm_ctx_intf;
  471. req_hdl_param.priv = ctx;
  472. CAM_DBG(CAM_CUSTOM, "get device handle from bridge");
  473. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  474. if (ctx->dev_hdl <= 0) {
  475. rc = -EFAULT;
  476. CAM_ERR(CAM_CUSTOM, "Can not create device handle");
  477. return rc;
  478. }
  479. cmd->dev_handle = ctx->dev_hdl;
  480. ctx->session_hdl = cmd->session_handle;
  481. ctx->state = CAM_CTX_ACQUIRED;
  482. CAM_DBG(CAM_CUSTOM,
  483. "Acquire dev success on session_hdl 0x%x for ctx %u",
  484. cmd->session_handle, ctx->ctx_id);
  485. return rc;
  486. }
  487. static int __cam_custom_ctx_enqueue_init_request(
  488. struct cam_context *ctx, struct cam_ctx_request *req)
  489. {
  490. int rc = 0;
  491. struct cam_ctx_request *req_old;
  492. struct cam_custom_dev_ctx_req *req_custom_old;
  493. struct cam_custom_dev_ctx_req *req_custom_new;
  494. spin_lock_bh(&ctx->lock);
  495. if (list_empty(&ctx->pending_req_list)) {
  496. list_add_tail(&req->list, &ctx->pending_req_list);
  497. goto end;
  498. }
  499. req_old = list_first_entry(&ctx->pending_req_list,
  500. struct cam_ctx_request, list);
  501. req_custom_old = (struct cam_custom_dev_ctx_req *) req_old->req_priv;
  502. req_custom_new = (struct cam_custom_dev_ctx_req *) req->req_priv;
  503. if (req_custom_old->hw_update_data.packet_opcode_type ==
  504. CAM_CUSTOM_PACKET_INIT_DEV) {
  505. if ((req_custom_old->num_cfg + req_custom_new->num_cfg) >=
  506. CAM_CUSTOM_CTX_CFG_MAX) {
  507. CAM_WARN(CAM_CUSTOM, "Can not merge INIT pkt");
  508. rc = -ENOMEM;
  509. }
  510. if (req_custom_old->num_fence_map_out != 0 ||
  511. req_custom_old->num_fence_map_in != 0) {
  512. CAM_WARN(CAM_CUSTOM, "Invalid INIT pkt sequence");
  513. rc = -EINVAL;
  514. }
  515. if (!rc) {
  516. memcpy(req_custom_old->fence_map_out,
  517. req_custom_new->fence_map_out,
  518. sizeof(req_custom_new->fence_map_out[0])*
  519. req_custom_new->num_fence_map_out);
  520. req_custom_old->num_fence_map_out =
  521. req_custom_new->num_fence_map_out;
  522. memcpy(req_custom_old->fence_map_in,
  523. req_custom_new->fence_map_in,
  524. sizeof(req_custom_new->fence_map_in[0])*
  525. req_custom_new->num_fence_map_in);
  526. req_custom_old->num_fence_map_in =
  527. req_custom_new->num_fence_map_in;
  528. memcpy(&req_custom_old->cfg[req_custom_old->num_cfg],
  529. req_custom_new->cfg,
  530. sizeof(req_custom_new->cfg[0])*
  531. req_custom_new->num_cfg);
  532. req_custom_old->num_cfg += req_custom_new->num_cfg;
  533. req_old->request_id = req->request_id;
  534. list_add_tail(&req->list, &ctx->free_req_list);
  535. }
  536. } else {
  537. CAM_WARN(CAM_CUSTOM,
  538. "Received Update pkt before INIT pkt. req_id= %lld",
  539. req->request_id);
  540. rc = -EINVAL;
  541. }
  542. end:
  543. spin_unlock_bh(&ctx->lock);
  544. return rc;
  545. }
  546. static int __cam_custom_ctx_config_dev(struct cam_context *ctx,
  547. struct cam_config_dev_cmd *cmd)
  548. {
  549. int rc = 0, i;
  550. struct cam_ctx_request *req = NULL;
  551. struct cam_custom_dev_ctx_req *req_custom;
  552. uintptr_t packet_addr;
  553. struct cam_packet *packet;
  554. size_t len = 0;
  555. struct cam_hw_prepare_update_args cfg;
  556. struct cam_req_mgr_add_request add_req;
  557. struct cam_custom_context *ctx_custom =
  558. (struct cam_custom_context *) ctx->ctx_priv;
  559. /* get free request */
  560. spin_lock_bh(&ctx->lock);
  561. if (!list_empty(&ctx->free_req_list)) {
  562. req = list_first_entry(&ctx->free_req_list,
  563. struct cam_ctx_request, list);
  564. list_del_init(&req->list);
  565. }
  566. spin_unlock_bh(&ctx->lock);
  567. if (!req) {
  568. CAM_ERR(CAM_CUSTOM, "No more request obj free");
  569. return -ENOMEM;
  570. }
  571. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  572. /* for config dev, only memory handle is supported */
  573. /* map packet from the memhandle */
  574. rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
  575. &packet_addr, &len);
  576. if (rc != 0) {
  577. CAM_ERR(CAM_CUSTOM, "Can not get packet address");
  578. rc = -EINVAL;
  579. goto free_req;
  580. }
  581. packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
  582. CAM_DBG(CAM_CUSTOM, "pack_handle %llx", cmd->packet_handle);
  583. CAM_DBG(CAM_CUSTOM, "packet address is 0x%zx", packet_addr);
  584. CAM_DBG(CAM_CUSTOM, "packet with length %zu, offset 0x%llx",
  585. len, cmd->offset);
  586. CAM_DBG(CAM_CUSTOM, "Packet request id %lld",
  587. packet->header.request_id);
  588. CAM_DBG(CAM_CUSTOM, "Packet size 0x%x", packet->header.size);
  589. CAM_DBG(CAM_CUSTOM, "packet op %d", packet->header.op_code);
  590. if ((((packet->header.op_code) & 0xF) ==
  591. CAM_CUSTOM_PACKET_UPDATE_DEV)
  592. && (packet->header.request_id <= ctx->last_flush_req)) {
  593. CAM_DBG(CAM_CUSTOM,
  594. "request %lld has been flushed, reject packet",
  595. packet->header.request_id);
  596. rc = -EINVAL;
  597. goto free_req;
  598. }
  599. /* preprocess the configuration */
  600. memset(&cfg, 0, sizeof(cfg));
  601. cfg.packet = packet;
  602. cfg.ctxt_to_hw_map = ctx_custom->hw_ctx;
  603. cfg.out_map_entries = req_custom->fence_map_out;
  604. cfg.in_map_entries = req_custom->fence_map_in;
  605. cfg.priv = &req_custom->hw_update_data;
  606. cfg.pf_data = &(req->pf_data);
  607. rc = ctx->hw_mgr_intf->hw_prepare_update(
  608. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  609. if (rc != 0) {
  610. CAM_ERR(CAM_CUSTOM, "Prepare config packet failed in HW layer");
  611. rc = -EFAULT;
  612. goto free_req;
  613. }
  614. req_custom->num_cfg = cfg.num_hw_update_entries;
  615. req_custom->num_fence_map_out = cfg.num_out_map_entries;
  616. req_custom->num_fence_map_in = cfg.num_in_map_entries;
  617. req_custom->num_acked = 0;
  618. for (i = 0; i < req_custom->num_fence_map_out; i++) {
  619. rc = cam_sync_get_obj_ref(req_custom->fence_map_out[i].sync_id);
  620. if (rc) {
  621. CAM_ERR(CAM_CUSTOM, "Can't get ref for fence %d",
  622. req_custom->fence_map_out[i].sync_id);
  623. goto put_ref;
  624. }
  625. }
  626. CAM_DBG(CAM_CUSTOM,
  627. "num_entry: %d, num fence out: %d, num fence in: %d",
  628. req_custom->num_cfg, req_custom->num_fence_map_out,
  629. req_custom->num_fence_map_in);
  630. req->request_id = packet->header.request_id;
  631. req->status = 1;
  632. CAM_DBG(CAM_CUSTOM, "Packet request id %lld packet opcode:%d",
  633. packet->header.request_id,
  634. req_custom->hw_update_data.packet_opcode_type);
  635. if (req_custom->hw_update_data.packet_opcode_type ==
  636. CAM_CUSTOM_PACKET_INIT_DEV) {
  637. if (ctx->state < CAM_CTX_ACTIVATED) {
  638. rc = __cam_custom_ctx_enqueue_init_request(ctx, req);
  639. if (rc)
  640. CAM_ERR(CAM_CUSTOM, "Enqueue INIT pkt failed");
  641. ctx_custom->init_received = true;
  642. } else {
  643. rc = -EINVAL;
  644. CAM_ERR(CAM_CUSTOM, "Recevied INIT pkt in wrong state");
  645. }
  646. } else {
  647. if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
  648. add_req.link_hdl = ctx->link_hdl;
  649. add_req.dev_hdl = ctx->dev_hdl;
  650. add_req.req_id = req->request_id;
  651. add_req.skip_before_applying = 0;
  652. rc = ctx->ctx_crm_intf->add_req(&add_req);
  653. if (rc) {
  654. CAM_ERR(CAM_CUSTOM,
  655. "Add req failed: req id=%llu",
  656. req->request_id);
  657. } else {
  658. __cam_custom_ctx_enqueue_request_in_order(
  659. ctx, req);
  660. }
  661. } else {
  662. rc = -EINVAL;
  663. CAM_ERR(CAM_CUSTOM, "Recevied Update in wrong state");
  664. }
  665. }
  666. if (rc)
  667. goto put_ref;
  668. CAM_DBG(CAM_CUSTOM,
  669. "Preprocessing Config req_id %lld successful on ctx %u",
  670. req->request_id, ctx->ctx_id);
  671. return rc;
  672. put_ref:
  673. for (--i; i >= 0; i--) {
  674. if (cam_sync_put_obj_ref(req_custom->fence_map_out[i].sync_id))
  675. CAM_ERR(CAM_CUSTOM, "Failed to put ref of fence %d",
  676. req_custom->fence_map_out[i].sync_id);
  677. }
  678. free_req:
  679. spin_lock_bh(&ctx->lock);
  680. list_add_tail(&req->list, &ctx->free_req_list);
  681. spin_unlock_bh(&ctx->lock);
  682. return rc;
  683. }
  684. static int __cam_custom_ctx_config_dev_in_acquired(struct cam_context *ctx,
  685. struct cam_config_dev_cmd *cmd)
  686. {
  687. int rc = 0;
  688. struct cam_custom_context *ctx_custom =
  689. (struct cam_custom_context *) ctx->ctx_priv;
  690. if (!ctx_custom->hw_acquired) {
  691. CAM_ERR(CAM_CUSTOM, "HW not acquired, reject config packet");
  692. return -EAGAIN;
  693. }
  694. rc = __cam_custom_ctx_config_dev(ctx, cmd);
  695. if (!rc && (ctx->link_hdl >= 0))
  696. ctx->state = CAM_CTX_READY;
  697. return rc;
  698. }
  699. static int __cam_custom_ctx_link_in_acquired(struct cam_context *ctx,
  700. struct cam_req_mgr_core_dev_link_setup *link)
  701. {
  702. struct cam_custom_context *ctx_custom =
  703. (struct cam_custom_context *) ctx->ctx_priv;
  704. ctx->link_hdl = link->link_hdl;
  705. ctx->ctx_crm_intf = link->crm_cb;
  706. ctx_custom->subscribe_event = link->subscribe_event;
  707. /* change state only if we had the init config */
  708. if (ctx_custom->init_received)
  709. ctx->state = CAM_CTX_READY;
  710. CAM_DBG(CAM_CUSTOM, "next state %d", ctx->state);
  711. return 0;
  712. }
  713. static int __cam_custom_ctx_unlink_in_acquired(struct cam_context *ctx,
  714. struct cam_req_mgr_core_dev_link_setup *unlink)
  715. {
  716. ctx->link_hdl = -1;
  717. ctx->ctx_crm_intf = NULL;
  718. return 0;
  719. }
  720. static int __cam_custom_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  721. struct cam_req_mgr_device_info *dev_info)
  722. {
  723. dev_info->dev_hdl = ctx->dev_hdl;
  724. strlcpy(dev_info->name, CAM_CUSTOM_DEV_NAME, sizeof(dev_info->name));
  725. dev_info->dev_id = CAM_REQ_MGR_DEVICE_CUSTOM_HW;
  726. dev_info->p_delay = 1;
  727. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  728. return 0;
  729. }
  730. static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
  731. struct cam_start_stop_dev_cmd *cmd)
  732. {
  733. int rc = 0;
  734. struct cam_hw_config_args hw_config;
  735. struct cam_ctx_request *req;
  736. struct cam_custom_dev_ctx_req *req_custom;
  737. struct cam_custom_context *ctx_custom =
  738. (struct cam_custom_context *) ctx->ctx_priv;
  739. if (cmd->session_handle != ctx->session_hdl ||
  740. cmd->dev_handle != ctx->dev_hdl) {
  741. rc = -EPERM;
  742. goto end;
  743. }
  744. if (list_empty(&ctx->pending_req_list)) {
  745. /* should never happen */
  746. CAM_ERR(CAM_CUSTOM, "Start device with empty configuration");
  747. rc = -EFAULT;
  748. goto end;
  749. } else {
  750. req = list_first_entry(&ctx->pending_req_list,
  751. struct cam_ctx_request, list);
  752. }
  753. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  754. if (!ctx_custom->hw_ctx) {
  755. CAM_ERR(CAM_CUSTOM, "Wrong hw context pointer.");
  756. rc = -EFAULT;
  757. goto end;
  758. }
  759. hw_config.ctxt_to_hw_map = ctx_custom->hw_ctx;
  760. hw_config.request_id = req->request_id;
  761. hw_config.hw_update_entries = req_custom->cfg;
  762. hw_config.num_hw_update_entries = req_custom->num_cfg;
  763. hw_config.priv = &req_custom->hw_update_data;
  764. hw_config.init_packet = 1;
  765. ctx->state = CAM_CTX_ACTIVATED;
  766. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  767. &hw_config);
  768. if (rc) {
  769. /* HW failure. User need to clean up the resource */
  770. CAM_ERR(CAM_CUSTOM, "Start HW failed");
  771. ctx->state = CAM_CTX_READY;
  772. goto end;
  773. }
  774. CAM_DBG(CAM_CUSTOM, "start device success ctx %u",
  775. ctx->ctx_id);
  776. spin_lock_bh(&ctx->lock);
  777. list_del_init(&req->list);
  778. if (req_custom->num_fence_map_out)
  779. list_add_tail(&req->list, &ctx->active_req_list);
  780. else
  781. list_add_tail(&req->list, &ctx->free_req_list);
  782. spin_unlock_bh(&ctx->lock);
  783. end:
  784. return rc;
  785. }
  786. static int __cam_custom_ctx_release_dev_in_activated(struct cam_context *ctx,
  787. struct cam_release_dev_cmd *cmd)
  788. {
  789. int rc = 0;
  790. rc = __cam_custom_stop_dev_core(ctx, NULL);
  791. if (rc)
  792. CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  793. rc = __cam_custom_release_dev_in_acquired(ctx, cmd);
  794. if (rc)
  795. CAM_ERR(CAM_CUSTOM, "Release device failed rc=%d", rc);
  796. return rc;
  797. }
  798. static int __cam_custom_ctx_unlink_in_activated(struct cam_context *ctx,
  799. struct cam_req_mgr_core_dev_link_setup *unlink)
  800. {
  801. int rc = 0;
  802. CAM_WARN(CAM_CUSTOM,
  803. "Received unlink in activated state. It's unexpected");
  804. rc = __cam_custom_stop_dev_in_activated(ctx, NULL);
  805. if (rc)
  806. CAM_WARN(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  807. rc = __cam_custom_ctx_unlink_in_ready(ctx, unlink);
  808. if (rc)
  809. CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc);
  810. return rc;
  811. }
  812. static int __cam_custom_ctx_process_evt(struct cam_context *ctx,
  813. struct cam_req_mgr_link_evt_data *link_evt_data)
  814. {
  815. switch (link_evt_data->evt_type) {
  816. case CAM_REQ_MGR_LINK_EVT_ERR:
  817. /* Handle error/bubble related issues */
  818. break;
  819. default:
  820. CAM_WARN(CAM_CUSTOM, "Unknown event from CRM");
  821. break;
  822. }
  823. return 0;
  824. }
  825. static int __cam_custom_ctx_handle_irq_in_activated(void *context,
  826. uint32_t evt_id, void *evt_data)
  827. {
  828. int rc;
  829. struct cam_context *ctx =
  830. (struct cam_context *)context;
  831. CAM_DBG(CAM_CUSTOM, "Enter %d", ctx->ctx_id);
  832. /*
  833. * handle based on different irq's currently
  834. * triggering only buf done if there are fences
  835. */
  836. rc = cam_context_buf_done_from_hw(ctx, evt_data, 0);
  837. if (rc)
  838. CAM_ERR(CAM_CUSTOM, "Failed in buf done, rc=%d", rc);
  839. return rc;
  840. }
  841. static int __cam_custom_ctx_acquire_hw_in_acquired(
  842. struct cam_context *ctx, void *args)
  843. {
  844. int rc = -EINVAL;
  845. uint32_t api_version;
  846. if (!ctx || !args) {
  847. CAM_ERR(CAM_CUSTOM, "Invalid input pointer");
  848. return rc;
  849. }
  850. api_version = *((uint32_t *)args);
  851. if (api_version == 1)
  852. rc = __cam_custom_ctx_acquire_hw_v1(ctx, args);
  853. else
  854. CAM_ERR(CAM_CUSTOM, "Unsupported api version %d",
  855. api_version);
  856. return rc;
  857. }
  858. /* top state machine */
  859. static struct cam_ctx_ops
  860. cam_custom_dev_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  861. /* Uninit */
  862. {
  863. .ioctl_ops = {},
  864. .crm_ops = {},
  865. .irq_ops = NULL,
  866. },
  867. /* Available */
  868. {
  869. .ioctl_ops = {
  870. .acquire_dev =
  871. __cam_custom_ctx_acquire_dev_in_available,
  872. },
  873. .crm_ops = {},
  874. .irq_ops = NULL,
  875. },
  876. /* Acquired */
  877. {
  878. .ioctl_ops = {
  879. .acquire_hw = __cam_custom_ctx_acquire_hw_in_acquired,
  880. .release_dev = __cam_custom_release_dev_in_acquired,
  881. .config_dev = __cam_custom_ctx_config_dev_in_acquired,
  882. .release_hw = __cam_custom_ctx_release_hw_in_top_state,
  883. },
  884. .crm_ops = {
  885. .link = __cam_custom_ctx_link_in_acquired,
  886. .unlink = __cam_custom_ctx_unlink_in_acquired,
  887. .get_dev_info =
  888. __cam_custom_ctx_get_dev_info_in_acquired,
  889. .flush_req = __cam_custom_ctx_flush_req_in_top_state,
  890. },
  891. .irq_ops = NULL,
  892. .pagefault_ops = NULL,
  893. },
  894. /* Ready */
  895. {
  896. .ioctl_ops = {
  897. .start_dev = __cam_custom_ctx_start_dev_in_ready,
  898. .release_dev = __cam_custom_release_dev_in_acquired,
  899. .config_dev = __cam_custom_ctx_config_dev,
  900. .release_hw = __cam_custom_ctx_release_hw_in_top_state,
  901. },
  902. .crm_ops = {
  903. .unlink = __cam_custom_ctx_unlink_in_ready,
  904. .flush_req = __cam_custom_ctx_flush_req_in_ready,
  905. },
  906. .irq_ops = NULL,
  907. .pagefault_ops = NULL,
  908. },
  909. /* Flushed */
  910. {},
  911. /* Activated */
  912. {
  913. .ioctl_ops = {
  914. .stop_dev = __cam_custom_stop_dev_in_activated,
  915. .release_dev =
  916. __cam_custom_ctx_release_dev_in_activated,
  917. .config_dev = __cam_custom_ctx_config_dev,
  918. .release_hw =
  919. __cam_custom_ctx_release_hw_in_activated_state,
  920. },
  921. .crm_ops = {
  922. .unlink = __cam_custom_ctx_unlink_in_activated,
  923. .apply_req =
  924. __cam_custom_ctx_apply_req_in_activated_state,
  925. .flush_req = __cam_custom_ctx_flush_req_in_top_state,
  926. .process_evt = __cam_custom_ctx_process_evt,
  927. },
  928. .irq_ops = __cam_custom_ctx_handle_irq_in_activated,
  929. .pagefault_ops = NULL,
  930. },
  931. };
  932. int cam_custom_dev_context_init(struct cam_custom_context *ctx,
  933. struct cam_context *ctx_base,
  934. struct cam_req_mgr_kmd_ops *crm_node_intf,
  935. struct cam_hw_mgr_intf *hw_intf,
  936. uint32_t ctx_id)
  937. {
  938. int rc = -1, i = 0;
  939. if (!ctx || !ctx_base) {
  940. CAM_ERR(CAM_CUSTOM, "Invalid Context");
  941. return -EINVAL;
  942. }
  943. /* Custom HW context setup */
  944. memset(ctx, 0, sizeof(*ctx));
  945. ctx->base = ctx_base;
  946. ctx->frame_id = 0;
  947. ctx->active_req_cnt = 0;
  948. ctx->hw_ctx = NULL;
  949. for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
  950. ctx->req_base[i].req_priv = &ctx->req_custom[i];
  951. ctx->req_custom[i].base = &ctx->req_base[i];
  952. }
  953. /* camera context setup */
  954. rc = cam_context_init(ctx_base, custom_dev_name, CAM_CUSTOM, ctx_id,
  955. crm_node_intf, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
  956. if (rc) {
  957. CAM_ERR(CAM_CUSTOM, "Camera Context Base init failed");
  958. return rc;
  959. }
  960. /* link camera context with custom HW context */
  961. ctx_base->state_machine = cam_custom_dev_ctx_top_state_machine;
  962. ctx_base->ctx_priv = ctx;
  963. return rc;
  964. }
  965. int cam_custom_dev_context_deinit(struct cam_custom_context *ctx)
  966. {
  967. if (ctx->base)
  968. cam_context_deinit(ctx->base);
  969. memset(ctx, 0, sizeof(*ctx));
  970. return 0;
  971. }