cam_custom_context.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/videodev2.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/ratelimit.h>
  10. #include "cam_mem_mgr.h"
  11. #include "cam_sync_api.h"
  12. #include "cam_req_mgr_dev.h"
  13. #include "cam_trace.h"
  14. #include "cam_debug_util.h"
  15. #include "cam_packet_util.h"
  16. #include "cam_context_utils.h"
  17. #include "cam_custom_context.h"
  18. #include "cam_common_util.h"
  19. static const char custom_dev_name[] = "custom hw";
  20. static int __cam_custom_ctx_handle_irq_in_activated(
  21. void *context, uint32_t evt_id, void *evt_data);
  22. static int __cam_custom_ctx_enqueue_request_in_order(
  23. struct cam_context *ctx, struct cam_ctx_request *req)
  24. {
  25. struct cam_ctx_request *req_current;
  26. struct cam_ctx_request *req_prev;
  27. struct list_head temp_list;
  28. INIT_LIST_HEAD(&temp_list);
  29. spin_lock_bh(&ctx->lock);
  30. if (list_empty(&ctx->pending_req_list)) {
  31. list_add_tail(&req->list, &ctx->pending_req_list);
  32. } else {
  33. list_for_each_entry_safe_reverse(
  34. req_current, req_prev, &ctx->pending_req_list, list) {
  35. if (req->request_id < req_current->request_id) {
  36. list_del_init(&req_current->list);
  37. list_add(&req_current->list, &temp_list);
  38. continue;
  39. } else if (req->request_id == req_current->request_id) {
  40. CAM_WARN(CAM_CUSTOM,
  41. "Received duplicated request %lld",
  42. req->request_id);
  43. }
  44. break;
  45. }
  46. list_add_tail(&req->list, &ctx->pending_req_list);
  47. if (!list_empty(&temp_list)) {
  48. list_for_each_entry_safe(
  49. req_current, req_prev, &temp_list, list) {
  50. list_del_init(&req_current->list);
  51. list_add_tail(&req_current->list,
  52. &ctx->pending_req_list);
  53. }
  54. }
  55. }
  56. spin_unlock_bh(&ctx->lock);
  57. return 0;
  58. }
  59. static int __cam_custom_ctx_flush_req(struct cam_context *ctx,
  60. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  61. {
  62. int i, rc;
  63. uint32_t cancel_req_id_found = 0;
  64. struct cam_ctx_request *req;
  65. struct cam_ctx_request *req_temp;
  66. struct cam_custom_dev_ctx_req *req_custom;
  67. struct list_head flush_list;
  68. INIT_LIST_HEAD(&flush_list);
  69. if (list_empty(req_list)) {
  70. CAM_DBG(CAM_CUSTOM, "request list is empty");
  71. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  72. CAM_ERR(CAM_CUSTOM, "no request to cancel");
  73. return -EINVAL;
  74. } else {
  75. return 0;
  76. }
  77. }
  78. CAM_DBG(CAM_CUSTOM, "Flush [%u] in progress for req_id %llu",
  79. flush_req->type, flush_req->req_id);
  80. list_for_each_entry_safe(req, req_temp, req_list, list) {
  81. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  82. if (req->request_id != flush_req->req_id) {
  83. continue;
  84. } else {
  85. list_del_init(&req->list);
  86. list_add_tail(&req->list, &flush_list);
  87. cancel_req_id_found = 1;
  88. break;
  89. }
  90. }
  91. list_del_init(&req->list);
  92. list_add_tail(&req->list, &flush_list);
  93. }
  94. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  95. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  96. for (i = 0; i < req_custom->num_fence_map_out; i++) {
  97. if (req_custom->fence_map_out[i].sync_id != -1) {
  98. CAM_DBG(CAM_CUSTOM,
  99. "Flush req 0x%llx, fence %d",
  100. req->request_id,
  101. req_custom->fence_map_out[i].sync_id);
  102. rc = cam_sync_signal(
  103. req_custom->fence_map_out[i].sync_id,
  104. CAM_SYNC_STATE_SIGNALED_ERROR);
  105. if (rc)
  106. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  107. "signal fence failed\n");
  108. req_custom->fence_map_out[i].sync_id = -1;
  109. }
  110. }
  111. list_add_tail(&req->list, &ctx->free_req_list);
  112. }
  113. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
  114. !cancel_req_id_found)
  115. CAM_DBG(CAM_CUSTOM,
  116. "Flush request id:%lld is not found in the list",
  117. flush_req->req_id);
  118. return 0;
  119. }
  120. static int __cam_custom_ctx_flush_req_in_top_state(
  121. struct cam_context *ctx,
  122. struct cam_req_mgr_flush_request *flush_req)
  123. {
  124. int rc = 0;
  125. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  126. CAM_INFO(CAM_CUSTOM, "Last request id to flush is %lld",
  127. flush_req->req_id);
  128. ctx->last_flush_req = flush_req->req_id;
  129. }
  130. spin_lock_bh(&ctx->lock);
  131. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  132. spin_unlock_bh(&ctx->lock);
  133. return rc;
  134. }
  135. static int __cam_custom_ctx_flush_req_in_ready(
  136. struct cam_context *ctx,
  137. struct cam_req_mgr_flush_request *flush_req)
  138. {
  139. int rc = 0;
  140. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  141. spin_lock_bh(&ctx->lock);
  142. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  143. /* if nothing is in pending req list, change state to acquire */
  144. if (list_empty(&ctx->pending_req_list))
  145. ctx->state = CAM_CTX_ACQUIRED;
  146. spin_unlock_bh(&ctx->lock);
  147. CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d",
  148. ctx->state);
  149. return rc;
  150. }
  151. static int __cam_custom_ctx_unlink_in_ready(struct cam_context *ctx,
  152. struct cam_req_mgr_core_dev_link_setup *unlink)
  153. {
  154. ctx->link_hdl = -1;
  155. ctx->ctx_crm_intf = NULL;
  156. ctx->state = CAM_CTX_ACQUIRED;
  157. return 0;
  158. }
  159. static int __cam_custom_stop_dev_core(
  160. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  161. {
  162. int rc = 0;
  163. uint32_t i;
  164. struct cam_custom_context *ctx_custom =
  165. (struct cam_custom_context *) ctx->ctx_priv;
  166. struct cam_ctx_request *req;
  167. struct cam_custom_dev_ctx_req *req_custom;
  168. struct cam_hw_stop_args stop;
  169. if (ctx_custom->hw_ctx) {
  170. stop.ctxt_to_hw_map = ctx_custom->hw_ctx;
  171. stop.args = NULL;
  172. if (ctx->hw_mgr_intf->hw_stop)
  173. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  174. &stop);
  175. }
  176. while (!list_empty(&ctx->pending_req_list)) {
  177. req = list_first_entry(&ctx->pending_req_list,
  178. struct cam_ctx_request, list);
  179. list_del_init(&req->list);
  180. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  181. CAM_DBG(CAM_CUSTOM,
  182. "signal fence in pending list. fence num %d",
  183. req_custom->num_fence_map_out);
  184. for (i = 0; i < req_custom->num_fence_map_out; i++)
  185. if (req_custom->fence_map_out[i].sync_id != -1) {
  186. cam_sync_signal(
  187. req_custom->fence_map_out[i].sync_id,
  188. CAM_SYNC_STATE_SIGNALED_ERROR);
  189. }
  190. list_add_tail(&req->list, &ctx->free_req_list);
  191. }
  192. while (!list_empty(&ctx->wait_req_list)) {
  193. req = list_first_entry(&ctx->wait_req_list,
  194. struct cam_ctx_request, list);
  195. list_del_init(&req->list);
  196. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  197. CAM_DBG(CAM_CUSTOM, "signal fence in wait list. fence num %d",
  198. req_custom->num_fence_map_out);
  199. for (i = 0; i < req_custom->num_fence_map_out; i++)
  200. if (req_custom->fence_map_out[i].sync_id != -1) {
  201. cam_sync_signal(
  202. req_custom->fence_map_out[i].sync_id,
  203. CAM_SYNC_STATE_SIGNALED_ERROR);
  204. }
  205. list_add_tail(&req->list, &ctx->free_req_list);
  206. }
  207. while (!list_empty(&ctx->active_req_list)) {
  208. req = list_first_entry(&ctx->active_req_list,
  209. struct cam_ctx_request, list);
  210. list_del_init(&req->list);
  211. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  212. CAM_DBG(CAM_CUSTOM, "signal fence in active list. fence num %d",
  213. req_custom->num_fence_map_out);
  214. for (i = 0; i < req_custom->num_fence_map_out; i++)
  215. if (req_custom->fence_map_out[i].sync_id != -1) {
  216. cam_sync_signal(
  217. req_custom->fence_map_out[i].sync_id,
  218. CAM_SYNC_STATE_SIGNALED_ERROR);
  219. }
  220. list_add_tail(&req->list, &ctx->free_req_list);
  221. }
  222. ctx_custom->frame_id = 0;
  223. ctx_custom->active_req_cnt = 0;
  224. CAM_DBG(CAM_CUSTOM, "Stop device success next state %d on ctx %u",
  225. ctx->state, ctx->ctx_id);
  226. if (!stop_cmd) {
  227. rc = __cam_custom_ctx_unlink_in_ready(ctx, NULL);
  228. if (rc)
  229. CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc);
  230. }
  231. return rc;
  232. }
  233. static int __cam_custom_stop_dev_in_activated(struct cam_context *ctx,
  234. struct cam_start_stop_dev_cmd *cmd)
  235. {
  236. struct cam_custom_context *ctx_custom =
  237. (struct cam_custom_context *)ctx->ctx_priv;
  238. __cam_custom_stop_dev_core(ctx, cmd);
  239. ctx_custom->init_received = false;
  240. ctx->state = CAM_CTX_ACQUIRED;
  241. return 0;
  242. }
  243. static int __cam_custom_release_dev_in_acquired(struct cam_context *ctx,
  244. struct cam_release_dev_cmd *cmd)
  245. {
  246. int rc;
  247. struct cam_custom_context *ctx_custom =
  248. (struct cam_custom_context *) ctx->ctx_priv;
  249. struct cam_req_mgr_flush_request flush_req;
  250. rc = cam_context_release_dev_to_hw(ctx, cmd);
  251. if (rc)
  252. CAM_ERR(CAM_CUSTOM, "Unable to release device");
  253. ctx->ctx_crm_intf = NULL;
  254. ctx->last_flush_req = 0;
  255. ctx_custom->frame_id = 0;
  256. ctx_custom->active_req_cnt = 0;
  257. ctx_custom->init_received = false;
  258. if (!list_empty(&ctx->active_req_list))
  259. CAM_ERR(CAM_CUSTOM, "Active list is not empty");
  260. /* Flush all the pending request list */
  261. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  262. flush_req.link_hdl = ctx->link_hdl;
  263. flush_req.dev_hdl = ctx->dev_hdl;
  264. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  265. spin_lock_bh(&ctx->lock);
  266. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list,
  267. &flush_req);
  268. spin_unlock_bh(&ctx->lock);
  269. ctx->state = CAM_CTX_AVAILABLE;
  270. CAM_DBG(CAM_CUSTOM, "Release device success[%u] next state %d",
  271. ctx->ctx_id, ctx->state);
  272. return rc;
  273. }
  274. static int __cam_custom_ctx_apply_req_in_activated_state(
  275. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  276. {
  277. int rc = 0;
  278. struct cam_ctx_request *req;
  279. struct cam_custom_dev_ctx_req *req_custom;
  280. struct cam_custom_context *custom_ctx = NULL;
  281. struct cam_hw_config_args cfg;
  282. if (list_empty(&ctx->pending_req_list)) {
  283. CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld",
  284. apply->request_id);
  285. rc = -EFAULT;
  286. goto end;
  287. }
  288. custom_ctx = (struct cam_custom_context *) ctx->ctx_priv;
  289. spin_lock_bh(&ctx->lock);
  290. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  291. list);
  292. spin_unlock_bh(&ctx->lock);
  293. /*
  294. * Check whether the request id is matching the tip
  295. */
  296. if (req->request_id != apply->request_id) {
  297. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  298. "Invalid Request Id asking %llu existing %llu",
  299. apply->request_id, req->request_id);
  300. rc = -EFAULT;
  301. goto end;
  302. }
  303. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  304. cfg.ctxt_to_hw_map = custom_ctx->hw_ctx;
  305. cfg.request_id = req->request_id;
  306. cfg.hw_update_entries = req_custom->cfg;
  307. cfg.num_hw_update_entries = req_custom->num_cfg;
  308. cfg.priv = &req_custom->hw_update_data;
  309. cfg.init_packet = 0;
  310. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  311. if (rc) {
  312. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  313. "Can not apply the configuration");
  314. } else {
  315. spin_lock_bh(&ctx->lock);
  316. list_del_init(&req->list);
  317. if (!req->num_out_map_entries) {
  318. list_add_tail(&req->list, &ctx->free_req_list);
  319. spin_unlock_bh(&ctx->lock);
  320. } else {
  321. list_add_tail(&req->list, &ctx->active_req_list);
  322. spin_unlock_bh(&ctx->lock);
  323. /*
  324. * for test purposes only-this should be
  325. * triggered based on irq
  326. */
  327. __cam_custom_ctx_handle_irq_in_activated(ctx, 0, NULL);
  328. }
  329. }
  330. end:
  331. return rc;
  332. }
  333. static int __cam_custom_ctx_acquire_dev_in_available(struct cam_context *ctx,
  334. struct cam_acquire_dev_cmd *cmd)
  335. {
  336. int rc;
  337. struct cam_custom_context *custom_ctx;
  338. custom_ctx = (struct cam_custom_context *) ctx->ctx_priv;
  339. if (cmd->num_resources > CAM_CUSTOM_DEV_CTX_RES_MAX) {
  340. CAM_ERR(CAM_CUSTOM, "Too much resources in the acquire");
  341. rc = -ENOMEM;
  342. return rc;
  343. }
  344. if (cmd->handle_type != 1) {
  345. CAM_ERR(CAM_CUSTOM, "Only user pointer is supported");
  346. rc = -EINVAL;
  347. return rc;
  348. }
  349. rc = cam_context_acquire_dev_to_hw(ctx, cmd);
  350. if (!rc) {
  351. ctx->state = CAM_CTX_ACQUIRED;
  352. custom_ctx->hw_ctx = ctx->ctxt_to_hw_map;
  353. }
  354. CAM_DBG(CAM_CUSTOM, "Acquire done %d", ctx->ctx_id);
  355. return rc;
  356. }
  357. static int __cam_custom_ctx_enqueue_init_request(
  358. struct cam_context *ctx, struct cam_ctx_request *req)
  359. {
  360. int rc = 0;
  361. struct cam_ctx_request *req_old;
  362. struct cam_custom_dev_ctx_req *req_custom_old;
  363. struct cam_custom_dev_ctx_req *req_custom_new;
  364. spin_lock_bh(&ctx->lock);
  365. if (list_empty(&ctx->pending_req_list)) {
  366. list_add_tail(&req->list, &ctx->pending_req_list);
  367. goto end;
  368. }
  369. req_old = list_first_entry(&ctx->pending_req_list,
  370. struct cam_ctx_request, list);
  371. req_custom_old = (struct cam_custom_dev_ctx_req *) req_old->req_priv;
  372. req_custom_new = (struct cam_custom_dev_ctx_req *) req->req_priv;
  373. if (req_custom_old->hw_update_data.packet_opcode_type ==
  374. CAM_CUSTOM_PACKET_INIT_DEV) {
  375. if ((req_custom_old->num_cfg + req_custom_new->num_cfg) >=
  376. CAM_CUSTOM_CTX_CFG_MAX) {
  377. CAM_WARN(CAM_CUSTOM, "Can not merge INIT pkt");
  378. rc = -ENOMEM;
  379. }
  380. if (req_custom_old->num_fence_map_out != 0 ||
  381. req_custom_old->num_fence_map_in != 0) {
  382. CAM_WARN(CAM_CUSTOM, "Invalid INIT pkt sequence");
  383. rc = -EINVAL;
  384. }
  385. if (!rc) {
  386. memcpy(req_custom_old->fence_map_out,
  387. req_custom_new->fence_map_out,
  388. sizeof(req_custom_new->fence_map_out[0])*
  389. req_custom_new->num_fence_map_out);
  390. req_custom_old->num_fence_map_out =
  391. req_custom_new->num_fence_map_out;
  392. memcpy(req_custom_old->fence_map_in,
  393. req_custom_new->fence_map_in,
  394. sizeof(req_custom_new->fence_map_in[0])*
  395. req_custom_new->num_fence_map_in);
  396. req_custom_old->num_fence_map_in =
  397. req_custom_new->num_fence_map_in;
  398. memcpy(&req_custom_old->cfg[req_custom_old->num_cfg],
  399. req_custom_new->cfg,
  400. sizeof(req_custom_new->cfg[0])*
  401. req_custom_new->num_cfg);
  402. req_custom_old->num_cfg += req_custom_new->num_cfg;
  403. req_old->request_id = req->request_id;
  404. list_add_tail(&req->list, &ctx->free_req_list);
  405. }
  406. } else {
  407. CAM_WARN(CAM_CUSTOM,
  408. "Received Update pkt before INIT pkt. req_id= %lld",
  409. req->request_id);
  410. rc = -EINVAL;
  411. }
  412. end:
  413. spin_unlock_bh(&ctx->lock);
  414. return rc;
  415. }
  416. static int __cam_custom_ctx_config_dev(struct cam_context *ctx,
  417. struct cam_config_dev_cmd *cmd)
  418. {
  419. int rc = 0, i;
  420. struct cam_ctx_request *req = NULL;
  421. struct cam_custom_dev_ctx_req *req_custom;
  422. uintptr_t packet_addr;
  423. struct cam_packet *packet;
  424. size_t len = 0;
  425. struct cam_hw_prepare_update_args cfg;
  426. struct cam_req_mgr_add_request add_req;
  427. struct cam_custom_context *ctx_custom =
  428. (struct cam_custom_context *) ctx->ctx_priv;
  429. /* get free request */
  430. spin_lock_bh(&ctx->lock);
  431. if (!list_empty(&ctx->free_req_list)) {
  432. req = list_first_entry(&ctx->free_req_list,
  433. struct cam_ctx_request, list);
  434. list_del_init(&req->list);
  435. }
  436. spin_unlock_bh(&ctx->lock);
  437. if (!req) {
  438. CAM_ERR(CAM_CUSTOM, "No more request obj free");
  439. return -ENOMEM;
  440. }
  441. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  442. /* for config dev, only memory handle is supported */
  443. /* map packet from the memhandle */
  444. rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
  445. &packet_addr, &len);
  446. if (rc != 0) {
  447. CAM_ERR(CAM_CUSTOM, "Can not get packet address");
  448. rc = -EINVAL;
  449. goto free_req;
  450. }
  451. packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
  452. CAM_DBG(CAM_CUSTOM, "pack_handle %llx", cmd->packet_handle);
  453. CAM_DBG(CAM_CUSTOM, "packet address is 0x%zx", packet_addr);
  454. CAM_DBG(CAM_CUSTOM, "packet with length %zu, offset 0x%llx",
  455. len, cmd->offset);
  456. CAM_DBG(CAM_CUSTOM, "Packet request id %lld",
  457. packet->header.request_id);
  458. CAM_DBG(CAM_CUSTOM, "Packet size 0x%x", packet->header.size);
  459. CAM_DBG(CAM_CUSTOM, "packet op %d", packet->header.op_code);
  460. if ((((packet->header.op_code) & 0xF) ==
  461. CAM_CUSTOM_PACKET_UPDATE_DEV)
  462. && (packet->header.request_id <= ctx->last_flush_req)) {
  463. CAM_DBG(CAM_CUSTOM,
  464. "request %lld has been flushed, reject packet",
  465. packet->header.request_id);
  466. rc = -EINVAL;
  467. goto free_req;
  468. }
  469. /* preprocess the configuration */
  470. memset(&cfg, 0, sizeof(cfg));
  471. cfg.packet = packet;
  472. cfg.ctxt_to_hw_map = ctx_custom->hw_ctx;
  473. cfg.out_map_entries = req_custom->fence_map_out;
  474. cfg.in_map_entries = req_custom->fence_map_in;
  475. cfg.priv = &req_custom->hw_update_data;
  476. cfg.pf_data = &(req->pf_data);
  477. rc = ctx->hw_mgr_intf->hw_prepare_update(
  478. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  479. if (rc != 0) {
  480. CAM_ERR(CAM_CUSTOM, "Prepare config packet failed in HW layer");
  481. rc = -EFAULT;
  482. goto free_req;
  483. }
  484. req_custom->num_cfg = cfg.num_hw_update_entries;
  485. req_custom->num_fence_map_out = cfg.num_out_map_entries;
  486. req_custom->num_fence_map_in = cfg.num_in_map_entries;
  487. req_custom->num_acked = 0;
  488. for (i = 0; i < req_custom->num_fence_map_out; i++) {
  489. rc = cam_sync_get_obj_ref(req_custom->fence_map_out[i].sync_id);
  490. if (rc) {
  491. CAM_ERR(CAM_CUSTOM, "Can't get ref for fence %d",
  492. req_custom->fence_map_out[i].sync_id);
  493. goto put_ref;
  494. }
  495. }
  496. CAM_DBG(CAM_CUSTOM,
  497. "num_entry: %d, num fence out: %d, num fence in: %d",
  498. req_custom->num_cfg, req_custom->num_fence_map_out,
  499. req_custom->num_fence_map_in);
  500. req->request_id = packet->header.request_id;
  501. req->status = 1;
  502. CAM_DBG(CAM_CUSTOM, "Packet request id %lld packet opcode:%d",
  503. packet->header.request_id,
  504. req_custom->hw_update_data.packet_opcode_type);
  505. if (req_custom->hw_update_data.packet_opcode_type ==
  506. CAM_CUSTOM_PACKET_INIT_DEV) {
  507. if (ctx->state < CAM_CTX_ACTIVATED) {
  508. rc = __cam_custom_ctx_enqueue_init_request(ctx, req);
  509. if (rc)
  510. CAM_ERR(CAM_CUSTOM, "Enqueue INIT pkt failed");
  511. ctx_custom->init_received = true;
  512. } else {
  513. rc = -EINVAL;
  514. CAM_ERR(CAM_CUSTOM, "Recevied INIT pkt in wrong state");
  515. }
  516. } else {
  517. if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
  518. add_req.link_hdl = ctx->link_hdl;
  519. add_req.dev_hdl = ctx->dev_hdl;
  520. add_req.req_id = req->request_id;
  521. add_req.skip_before_applying = 0;
  522. rc = ctx->ctx_crm_intf->add_req(&add_req);
  523. if (rc) {
  524. CAM_ERR(CAM_CUSTOM,
  525. "Add req failed: req id=%llu",
  526. req->request_id);
  527. } else {
  528. __cam_custom_ctx_enqueue_request_in_order(
  529. ctx, req);
  530. }
  531. } else {
  532. rc = -EINVAL;
  533. CAM_ERR(CAM_CUSTOM, "Recevied Update in wrong state");
  534. }
  535. }
  536. if (rc)
  537. goto put_ref;
  538. CAM_DBG(CAM_CUSTOM,
  539. "Preprocessing Config req_id %lld successful on ctx %u",
  540. req->request_id, ctx->ctx_id);
  541. return rc;
  542. put_ref:
  543. for (--i; i >= 0; i--) {
  544. if (cam_sync_put_obj_ref(req_custom->fence_map_out[i].sync_id))
  545. CAM_ERR(CAM_CUSTOM, "Failed to put ref of fence %d",
  546. req_custom->fence_map_out[i].sync_id);
  547. }
  548. free_req:
  549. spin_lock_bh(&ctx->lock);
  550. list_add_tail(&req->list, &ctx->free_req_list);
  551. spin_unlock_bh(&ctx->lock);
  552. return rc;
  553. }
  554. static int __cam_custom_ctx_config_dev_in_acquired(struct cam_context *ctx,
  555. struct cam_config_dev_cmd *cmd)
  556. {
  557. int rc = 0;
  558. rc = __cam_custom_ctx_config_dev(ctx, cmd);
  559. if (!rc && (ctx->link_hdl >= 0))
  560. ctx->state = CAM_CTX_READY;
  561. return rc;
  562. }
  563. static int __cam_custom_ctx_link_in_acquired(struct cam_context *ctx,
  564. struct cam_req_mgr_core_dev_link_setup *link)
  565. {
  566. struct cam_custom_context *ctx_custom =
  567. (struct cam_custom_context *) ctx->ctx_priv;
  568. ctx->link_hdl = link->link_hdl;
  569. ctx->ctx_crm_intf = link->crm_cb;
  570. ctx_custom->subscribe_event = link->subscribe_event;
  571. /* change state only if we had the init config */
  572. if (ctx_custom->init_received)
  573. ctx->state = CAM_CTX_READY;
  574. CAM_DBG(CAM_CUSTOM, "next state %d", ctx->state);
  575. return 0;
  576. }
  577. static int __cam_custom_ctx_unlink_in_acquired(struct cam_context *ctx,
  578. struct cam_req_mgr_core_dev_link_setup *unlink)
  579. {
  580. ctx->link_hdl = -1;
  581. ctx->ctx_crm_intf = NULL;
  582. return 0;
  583. }
  584. static int __cam_custom_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  585. struct cam_req_mgr_device_info *dev_info)
  586. {
  587. dev_info->dev_hdl = ctx->dev_hdl;
  588. strlcpy(dev_info->name, CAM_CUSTOM_DEV_NAME, sizeof(dev_info->name));
  589. dev_info->dev_id = CAM_REQ_MGR_DEVICE_CUSTOM_HW;
  590. dev_info->p_delay = 1;
  591. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  592. return 0;
  593. }
  594. static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
  595. struct cam_start_stop_dev_cmd *cmd)
  596. {
  597. int rc = 0;
  598. struct cam_hw_config_args hw_config;
  599. struct cam_ctx_request *req;
  600. struct cam_custom_dev_ctx_req *req_custom;
  601. struct cam_custom_context *ctx_custom =
  602. (struct cam_custom_context *) ctx->ctx_priv;
  603. if (cmd->session_handle != ctx->session_hdl ||
  604. cmd->dev_handle != ctx->dev_hdl) {
  605. rc = -EPERM;
  606. goto end;
  607. }
  608. if (list_empty(&ctx->pending_req_list)) {
  609. /* should never happen */
  610. CAM_ERR(CAM_CUSTOM, "Start device with empty configuration");
  611. rc = -EFAULT;
  612. goto end;
  613. } else {
  614. req = list_first_entry(&ctx->pending_req_list,
  615. struct cam_ctx_request, list);
  616. }
  617. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  618. if (!ctx_custom->hw_ctx) {
  619. CAM_ERR(CAM_CUSTOM, "Wrong hw context pointer.");
  620. rc = -EFAULT;
  621. goto end;
  622. }
  623. hw_config.ctxt_to_hw_map = ctx_custom->hw_ctx;
  624. hw_config.request_id = req->request_id;
  625. hw_config.hw_update_entries = req_custom->cfg;
  626. hw_config.num_hw_update_entries = req_custom->num_cfg;
  627. hw_config.priv = &req_custom->hw_update_data;
  628. hw_config.init_packet = 1;
  629. ctx->state = CAM_CTX_ACTIVATED;
  630. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  631. &hw_config);
  632. if (rc) {
  633. /* HW failure. User need to clean up the resource */
  634. CAM_ERR(CAM_CUSTOM, "Start HW failed");
  635. ctx->state = CAM_CTX_READY;
  636. goto end;
  637. }
  638. CAM_DBG(CAM_CUSTOM, "start device success ctx %u",
  639. ctx->ctx_id);
  640. spin_lock_bh(&ctx->lock);
  641. list_del_init(&req->list);
  642. if (req_custom->num_fence_map_out)
  643. list_add_tail(&req->list, &ctx->active_req_list);
  644. else
  645. list_add_tail(&req->list, &ctx->free_req_list);
  646. spin_unlock_bh(&ctx->lock);
  647. end:
  648. return rc;
  649. }
  650. static int __cam_custom_ctx_release_dev_in_activated(struct cam_context *ctx,
  651. struct cam_release_dev_cmd *cmd)
  652. {
  653. int rc = 0;
  654. rc = __cam_custom_stop_dev_core(ctx, NULL);
  655. if (rc)
  656. CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  657. rc = __cam_custom_release_dev_in_acquired(ctx, cmd);
  658. if (rc)
  659. CAM_ERR(CAM_CUSTOM, "Release device failed rc=%d", rc);
  660. return rc;
  661. }
  662. static int __cam_custom_ctx_unlink_in_activated(struct cam_context *ctx,
  663. struct cam_req_mgr_core_dev_link_setup *unlink)
  664. {
  665. int rc = 0;
  666. CAM_WARN(CAM_CUSTOM,
  667. "Received unlink in activated state. It's unexpected");
  668. rc = __cam_custom_stop_dev_in_activated(ctx, NULL);
  669. if (rc)
  670. CAM_WARN(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  671. rc = __cam_custom_ctx_unlink_in_ready(ctx, unlink);
  672. if (rc)
  673. CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc);
  674. return rc;
  675. }
  676. static int __cam_custom_ctx_process_evt(struct cam_context *ctx,
  677. struct cam_req_mgr_link_evt_data *link_evt_data)
  678. {
  679. switch (link_evt_data->evt_type) {
  680. case CAM_REQ_MGR_LINK_EVT_ERR:
  681. /* Handle error/bubble related issues */
  682. break;
  683. default:
  684. CAM_WARN(CAM_CUSTOM, "Unknown event from CRM");
  685. break;
  686. }
  687. return 0;
  688. }
  689. static int __cam_custom_ctx_handle_irq_in_activated(void *context,
  690. uint32_t evt_id, void *evt_data)
  691. {
  692. int rc;
  693. struct cam_context *ctx =
  694. (struct cam_context *)context;
  695. CAM_DBG(CAM_CUSTOM, "Enter %d", ctx->ctx_id);
  696. /*
  697. * handle based on different irq's currently
  698. * triggering only buf done if there are fences
  699. */
  700. rc = cam_context_buf_done_from_hw(ctx, evt_data, 0);
  701. if (rc)
  702. CAM_ERR(CAM_CUSTOM, "Failed in buf done, rc=%d", rc);
  703. return rc;
  704. }
  705. /* top state machine */
  706. static struct cam_ctx_ops
  707. cam_custom_dev_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  708. /* Uninit */
  709. {
  710. .ioctl_ops = {},
  711. .crm_ops = {},
  712. .irq_ops = NULL,
  713. },
  714. /* Available */
  715. {
  716. .ioctl_ops = {
  717. .acquire_dev =
  718. __cam_custom_ctx_acquire_dev_in_available,
  719. },
  720. .crm_ops = {},
  721. .irq_ops = NULL,
  722. },
  723. /* Acquired */
  724. {
  725. .ioctl_ops = {
  726. .release_dev = __cam_custom_release_dev_in_acquired,
  727. .config_dev = __cam_custom_ctx_config_dev_in_acquired,
  728. },
  729. .crm_ops = {
  730. .link = __cam_custom_ctx_link_in_acquired,
  731. .unlink = __cam_custom_ctx_unlink_in_acquired,
  732. .get_dev_info =
  733. __cam_custom_ctx_get_dev_info_in_acquired,
  734. .flush_req = __cam_custom_ctx_flush_req_in_top_state,
  735. },
  736. .irq_ops = NULL,
  737. .pagefault_ops = NULL,
  738. },
  739. /* Ready */
  740. {
  741. .ioctl_ops = {
  742. .start_dev = __cam_custom_ctx_start_dev_in_ready,
  743. .release_dev = __cam_custom_release_dev_in_acquired,
  744. .config_dev = __cam_custom_ctx_config_dev,
  745. },
  746. .crm_ops = {
  747. .unlink = __cam_custom_ctx_unlink_in_ready,
  748. .flush_req = __cam_custom_ctx_flush_req_in_ready,
  749. },
  750. .irq_ops = NULL,
  751. .pagefault_ops = NULL,
  752. },
  753. /* Flushed */
  754. {},
  755. /* Activated */
  756. {
  757. .ioctl_ops = {
  758. .stop_dev = __cam_custom_stop_dev_in_activated,
  759. .release_dev =
  760. __cam_custom_ctx_release_dev_in_activated,
  761. .config_dev = __cam_custom_ctx_config_dev,
  762. },
  763. .crm_ops = {
  764. .unlink = __cam_custom_ctx_unlink_in_activated,
  765. .apply_req =
  766. __cam_custom_ctx_apply_req_in_activated_state,
  767. .flush_req = __cam_custom_ctx_flush_req_in_top_state,
  768. .process_evt = __cam_custom_ctx_process_evt,
  769. },
  770. .irq_ops = __cam_custom_ctx_handle_irq_in_activated,
  771. .pagefault_ops = NULL,
  772. },
  773. };
  774. int cam_custom_dev_context_init(struct cam_custom_context *ctx,
  775. struct cam_context *ctx_base,
  776. struct cam_req_mgr_kmd_ops *crm_node_intf,
  777. struct cam_hw_mgr_intf *hw_intf,
  778. uint32_t ctx_id)
  779. {
  780. int rc = -1, i = 0;
  781. if (!ctx || !ctx_base) {
  782. CAM_ERR(CAM_CUSTOM, "Invalid Context");
  783. return -EINVAL;
  784. }
  785. /* Custom HW context setup */
  786. memset(ctx, 0, sizeof(*ctx));
  787. ctx->base = ctx_base;
  788. ctx->frame_id = 0;
  789. ctx->active_req_cnt = 0;
  790. ctx->hw_ctx = NULL;
  791. for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
  792. ctx->req_base[i].req_priv = &ctx->req_custom[i];
  793. ctx->req_custom[i].base = &ctx->req_base[i];
  794. }
  795. /* camera context setup */
  796. rc = cam_context_init(ctx_base, custom_dev_name, CAM_CUSTOM, ctx_id,
  797. crm_node_intf, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
  798. if (rc) {
  799. CAM_ERR(CAM_CUSTOM, "Camera Context Base init failed");
  800. return rc;
  801. }
  802. /* link camera context with custom HW context */
  803. ctx_base->state_machine = cam_custom_dev_ctx_top_state_machine;
  804. ctx_base->ctx_priv = ctx;
  805. return rc;
  806. }
  807. int cam_custom_dev_context_deinit(struct cam_custom_context *ctx)
  808. {
  809. if (ctx->base)
  810. cam_context_deinit(ctx->base);
  811. memset(ctx, 0, sizeof(*ctx));
  812. return 0;
  813. }