cam_custom_context.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/videodev2.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/ratelimit.h>
  10. #include "cam_mem_mgr.h"
  11. #include "cam_sync_api.h"
  12. #include "cam_req_mgr_dev.h"
  13. #include "cam_trace.h"
  14. #include "cam_debug_util.h"
  15. #include "cam_packet_util.h"
  16. #include "cam_context_utils.h"
  17. #include "cam_custom_context.h"
  18. #include "cam_common_util.h"
  19. static const char custom_dev_name[] = "cam-custom";
  20. static int __cam_custom_ctx_handle_irq_in_activated(
  21. void *context, uint32_t evt_id, void *evt_data);
  22. static int __cam_custom_ctx_start_dev_in_ready(
  23. struct cam_context *ctx, struct cam_start_stop_dev_cmd *cmd);
  24. static int __cam_custom_ctx_enqueue_request_in_order(
  25. struct cam_context *ctx, struct cam_ctx_request *req)
  26. {
  27. struct cam_ctx_request *req_current;
  28. struct cam_ctx_request *req_prev;
  29. struct list_head temp_list;
  30. INIT_LIST_HEAD(&temp_list);
  31. spin_lock_bh(&ctx->lock);
  32. if (list_empty(&ctx->pending_req_list)) {
  33. list_add_tail(&req->list, &ctx->pending_req_list);
  34. } else {
  35. list_for_each_entry_safe_reverse(
  36. req_current, req_prev, &ctx->pending_req_list, list) {
  37. if (req->request_id < req_current->request_id) {
  38. list_del_init(&req_current->list);
  39. list_add(&req_current->list, &temp_list);
  40. continue;
  41. } else if (req->request_id == req_current->request_id) {
  42. CAM_WARN(CAM_CUSTOM,
  43. "Received duplicated request %lld",
  44. req->request_id);
  45. }
  46. break;
  47. }
  48. list_add_tail(&req->list, &ctx->pending_req_list);
  49. if (!list_empty(&temp_list)) {
  50. list_for_each_entry_safe(
  51. req_current, req_prev, &temp_list, list) {
  52. list_del_init(&req_current->list);
  53. list_add_tail(&req_current->list,
  54. &ctx->pending_req_list);
  55. }
  56. }
  57. }
  58. spin_unlock_bh(&ctx->lock);
  59. return 0;
  60. }
  61. static int __cam_custom_ctx_flush_req(struct cam_context *ctx,
  62. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  63. {
  64. int i, rc;
  65. uint32_t cancel_req_id_found = 0;
  66. struct cam_ctx_request *req;
  67. struct cam_ctx_request *req_temp;
  68. struct cam_custom_dev_ctx_req *req_custom;
  69. struct list_head flush_list;
  70. INIT_LIST_HEAD(&flush_list);
  71. if (list_empty(req_list)) {
  72. CAM_DBG(CAM_CUSTOM, "request list is empty");
  73. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  74. CAM_ERR(CAM_CUSTOM, "no request to cancel");
  75. return -EINVAL;
  76. } else {
  77. return 0;
  78. }
  79. }
  80. CAM_DBG(CAM_CUSTOM, "Flush [%u] in progress for req_id %llu",
  81. flush_req->type, flush_req->req_id);
  82. list_for_each_entry_safe(req, req_temp, req_list, list) {
  83. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  84. if (req->request_id != flush_req->req_id) {
  85. continue;
  86. } else {
  87. list_del_init(&req->list);
  88. list_add_tail(&req->list, &flush_list);
  89. cancel_req_id_found = 1;
  90. break;
  91. }
  92. }
  93. list_del_init(&req->list);
  94. list_add_tail(&req->list, &flush_list);
  95. }
  96. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  97. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  98. for (i = 0; i < req_custom->num_fence_map_out; i++) {
  99. if (req_custom->fence_map_out[i].sync_id != -1) {
  100. CAM_DBG(CAM_CUSTOM,
  101. "Flush req 0x%llx, fence %d",
  102. req->request_id,
  103. req_custom->fence_map_out[i].sync_id);
  104. rc = cam_sync_signal(
  105. req_custom->fence_map_out[i].sync_id,
  106. CAM_SYNC_STATE_SIGNALED_CANCEL);
  107. if (rc)
  108. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  109. "signal fence failed\n");
  110. req_custom->fence_map_out[i].sync_id = -1;
  111. }
  112. }
  113. list_add_tail(&req->list, &ctx->free_req_list);
  114. }
  115. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
  116. !cancel_req_id_found)
  117. CAM_DBG(CAM_CUSTOM,
  118. "Flush request id:%lld is not found in the list",
  119. flush_req->req_id);
  120. return 0;
  121. }
  122. static int __cam_custom_ctx_unlink_in_acquired(struct cam_context *ctx,
  123. struct cam_req_mgr_core_dev_link_setup *unlink)
  124. {
  125. ctx->link_hdl = -1;
  126. ctx->ctx_crm_intf = NULL;
  127. return 0;
  128. }
  129. static int __cam_custom_ctx_unlink_in_ready(struct cam_context *ctx,
  130. struct cam_req_mgr_core_dev_link_setup *unlink)
  131. {
  132. ctx->link_hdl = -1;
  133. ctx->ctx_crm_intf = NULL;
  134. ctx->state = CAM_CTX_ACQUIRED;
  135. return 0;
  136. }
  137. static int __cam_custom_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  138. struct cam_req_mgr_device_info *dev_info)
  139. {
  140. dev_info->dev_hdl = ctx->dev_hdl;
  141. strlcpy(dev_info->name, CAM_CUSTOM_DEV_NAME, sizeof(dev_info->name));
  142. dev_info->dev_id = CAM_REQ_MGR_DEVICE_CUSTOM_HW;
  143. dev_info->p_delay = 1;
  144. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  145. return 0;
  146. }
  147. static int __cam_custom_ctx_flush_req_in_top_state(
  148. struct cam_context *ctx,
  149. struct cam_req_mgr_flush_request *flush_req)
  150. {
  151. int rc = 0;
  152. struct cam_custom_context *custom_ctx;
  153. struct cam_hw_reset_args reset_args;
  154. struct cam_hw_stop_args stop_args;
  155. struct cam_custom_stop_args custom_stop;
  156. custom_ctx =
  157. (struct cam_custom_context *) ctx->ctx_priv;
  158. CAM_DBG(CAM_CUSTOM, "Flushing pending list");
  159. spin_lock_bh(&ctx->lock);
  160. __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  161. spin_unlock_bh(&ctx->lock);
  162. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  163. if (ctx->state <= CAM_CTX_READY) {
  164. ctx->state = CAM_CTX_ACQUIRED;
  165. goto end;
  166. }
  167. spin_lock_bh(&ctx->lock);
  168. ctx->state = CAM_CTX_FLUSHED;
  169. spin_unlock_bh(&ctx->lock);
  170. CAM_INFO(CAM_CUSTOM, "Last request id to flush is %lld",
  171. flush_req->req_id);
  172. ctx->last_flush_req = flush_req->req_id;
  173. /* stop hw first */
  174. if (ctx->hw_mgr_intf->hw_stop) {
  175. custom_stop.stop_only = true;
  176. stop_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  177. stop_args.args = (void *) &custom_stop;
  178. rc = ctx->hw_mgr_intf->hw_stop(
  179. ctx->hw_mgr_intf->hw_mgr_priv, &stop_args);
  180. if (rc)
  181. CAM_ERR(CAM_CUSTOM,
  182. "HW stop failed in flush rc %d", rc);
  183. }
  184. spin_lock_bh(&ctx->lock);
  185. if (!list_empty(&ctx->wait_req_list))
  186. __cam_custom_ctx_flush_req(ctx, &ctx->wait_req_list,
  187. flush_req);
  188. if (!list_empty(&ctx->active_req_list))
  189. __cam_custom_ctx_flush_req(ctx, &ctx->active_req_list,
  190. flush_req);
  191. custom_ctx->active_req_cnt = 0;
  192. spin_unlock_bh(&ctx->lock);
  193. reset_args.ctxt_to_hw_map = custom_ctx->hw_ctx;
  194. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  195. &reset_args);
  196. if (rc)
  197. CAM_ERR(CAM_CUSTOM,
  198. "Reset HW failed in flush rc %d", rc);
  199. custom_ctx->init_received = false;
  200. }
  201. end:
  202. return rc;
  203. }
  204. static int __cam_custom_ctx_flush_req_in_ready(
  205. struct cam_context *ctx,
  206. struct cam_req_mgr_flush_request *flush_req)
  207. {
  208. int rc = 0;
  209. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  210. spin_lock_bh(&ctx->lock);
  211. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  212. /* if nothing is in pending req list, change state to acquire */
  213. if (list_empty(&ctx->pending_req_list))
  214. ctx->state = CAM_CTX_ACQUIRED;
  215. spin_unlock_bh(&ctx->lock);
  216. CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d",
  217. ctx->state);
  218. return rc;
  219. }
  220. static int __cam_custom_stop_dev_core(
  221. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  222. {
  223. int rc = 0;
  224. uint32_t i;
  225. struct cam_custom_context *ctx_custom =
  226. (struct cam_custom_context *) ctx->ctx_priv;
  227. struct cam_ctx_request *req;
  228. struct cam_custom_dev_ctx_req *req_custom;
  229. struct cam_hw_stop_args stop;
  230. struct cam_custom_stop_args custom_stop;
  231. if ((ctx->state != CAM_CTX_FLUSHED) && (ctx_custom->hw_ctx) &&
  232. (ctx->hw_mgr_intf->hw_stop)) {
  233. custom_stop.stop_only = false;
  234. stop.ctxt_to_hw_map = ctx_custom->hw_ctx;
  235. stop.args = (void *) &custom_stop;
  236. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  237. &stop);
  238. if (rc)
  239. CAM_ERR(CAM_CUSTOM, "HW stop failed rc %d", rc);
  240. }
  241. while (!list_empty(&ctx->pending_req_list)) {
  242. req = list_first_entry(&ctx->pending_req_list,
  243. struct cam_ctx_request, list);
  244. list_del_init(&req->list);
  245. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  246. CAM_DBG(CAM_CUSTOM,
  247. "signal fence in pending list. fence num %d",
  248. req_custom->num_fence_map_out);
  249. for (i = 0; i < req_custom->num_fence_map_out; i++)
  250. if (req_custom->fence_map_out[i].sync_id != -1) {
  251. cam_sync_signal(
  252. req_custom->fence_map_out[i].sync_id,
  253. CAM_SYNC_STATE_SIGNALED_CANCEL);
  254. }
  255. list_add_tail(&req->list, &ctx->free_req_list);
  256. }
  257. while (!list_empty(&ctx->wait_req_list)) {
  258. req = list_first_entry(&ctx->wait_req_list,
  259. struct cam_ctx_request, list);
  260. list_del_init(&req->list);
  261. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  262. CAM_DBG(CAM_CUSTOM, "signal fence in wait list. fence num %d",
  263. req_custom->num_fence_map_out);
  264. for (i = 0; i < req_custom->num_fence_map_out; i++)
  265. if (req_custom->fence_map_out[i].sync_id != -1) {
  266. cam_sync_signal(
  267. req_custom->fence_map_out[i].sync_id,
  268. CAM_SYNC_STATE_SIGNALED_CANCEL);
  269. }
  270. list_add_tail(&req->list, &ctx->free_req_list);
  271. }
  272. while (!list_empty(&ctx->active_req_list)) {
  273. req = list_first_entry(&ctx->active_req_list,
  274. struct cam_ctx_request, list);
  275. list_del_init(&req->list);
  276. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  277. CAM_DBG(CAM_CUSTOM, "signal fence in active list. fence num %d",
  278. req_custom->num_fence_map_out);
  279. for (i = 0; i < req_custom->num_fence_map_out; i++)
  280. if (req_custom->fence_map_out[i].sync_id != -1) {
  281. cam_sync_signal(
  282. req_custom->fence_map_out[i].sync_id,
  283. CAM_SYNC_STATE_SIGNALED_CANCEL);
  284. }
  285. list_add_tail(&req->list, &ctx->free_req_list);
  286. }
  287. ctx_custom->frame_id = 0;
  288. ctx_custom->active_req_cnt = 0;
  289. CAM_DBG(CAM_CUSTOM, "Stop device success next state %d on ctx %u",
  290. ctx->state, ctx->ctx_id);
  291. if (!stop_cmd) {
  292. rc = __cam_custom_ctx_unlink_in_ready(ctx, NULL);
  293. if (rc)
  294. CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc);
  295. }
  296. return rc;
  297. }
  298. static int __cam_custom_stop_dev_in_activated(struct cam_context *ctx,
  299. struct cam_start_stop_dev_cmd *cmd)
  300. {
  301. struct cam_custom_context *ctx_custom =
  302. (struct cam_custom_context *)ctx->ctx_priv;
  303. __cam_custom_stop_dev_core(ctx, cmd);
  304. ctx_custom->init_received = false;
  305. ctx->state = CAM_CTX_ACQUIRED;
  306. return 0;
  307. }
  308. static int __cam_custom_ctx_release_hw_in_top_state(
  309. struct cam_context *ctx, void *cmd)
  310. {
  311. int rc = 0;
  312. struct cam_hw_release_args rel_arg;
  313. struct cam_req_mgr_flush_request flush_req;
  314. struct cam_custom_context *custom_ctx =
  315. (struct cam_custom_context *) ctx->ctx_priv;
  316. if (custom_ctx->hw_ctx) {
  317. rel_arg.ctxt_to_hw_map = custom_ctx->hw_ctx;
  318. rc = ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  319. &rel_arg);
  320. custom_ctx->hw_ctx = NULL;
  321. if (rc)
  322. CAM_ERR(CAM_CUSTOM,
  323. "Failed to release HW for ctx:%u", ctx->ctx_id);
  324. } else {
  325. CAM_ERR(CAM_CUSTOM, "No HW resources acquired for this ctx");
  326. }
  327. ctx->last_flush_req = 0;
  328. custom_ctx->frame_id = 0;
  329. custom_ctx->active_req_cnt = 0;
  330. custom_ctx->hw_acquired = false;
  331. custom_ctx->init_received = false;
  332. /* check for active requests as well */
  333. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  334. flush_req.link_hdl = ctx->link_hdl;
  335. flush_req.dev_hdl = ctx->dev_hdl;
  336. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  337. spin_lock_bh(&ctx->lock);
  338. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list,
  339. &flush_req);
  340. spin_unlock_bh(&ctx->lock);
  341. ctx->state = CAM_CTX_ACQUIRED;
  342. CAM_DBG(CAM_CUSTOM, "Release HW success[%u] next state %d",
  343. ctx->ctx_id, ctx->state);
  344. return rc;
  345. }
  346. static int __cam_custom_ctx_release_hw_in_activated_state(
  347. struct cam_context *ctx, void *cmd)
  348. {
  349. int rc = 0;
  350. rc = __cam_custom_stop_dev_in_activated(ctx, NULL);
  351. if (rc)
  352. CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  353. rc = __cam_custom_ctx_release_hw_in_top_state(ctx, cmd);
  354. if (rc)
  355. CAM_ERR(CAM_CUSTOM, "Release hw failed rc=%d", rc);
  356. return rc;
  357. }
  358. static int __cam_custom_release_dev_in_acquired(struct cam_context *ctx,
  359. struct cam_release_dev_cmd *cmd)
  360. {
  361. int rc;
  362. struct cam_custom_context *ctx_custom =
  363. (struct cam_custom_context *) ctx->ctx_priv;
  364. struct cam_req_mgr_flush_request flush_req;
  365. if (cmd && ctx_custom->hw_ctx) {
  366. CAM_ERR(CAM_CUSTOM, "releasing hw");
  367. __cam_custom_ctx_release_hw_in_top_state(ctx, NULL);
  368. }
  369. ctx->ctx_crm_intf = NULL;
  370. ctx->last_flush_req = 0;
  371. ctx_custom->frame_id = 0;
  372. ctx_custom->active_req_cnt = 0;
  373. ctx_custom->hw_acquired = false;
  374. ctx_custom->init_received = false;
  375. if (!list_empty(&ctx->active_req_list))
  376. CAM_ERR(CAM_CUSTOM, "Active list is not empty");
  377. /* Flush all the pending request list */
  378. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  379. flush_req.link_hdl = ctx->link_hdl;
  380. flush_req.dev_hdl = ctx->dev_hdl;
  381. CAM_DBG(CAM_CUSTOM, "try to flush pending list");
  382. spin_lock_bh(&ctx->lock);
  383. rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list,
  384. &flush_req);
  385. spin_unlock_bh(&ctx->lock);
  386. ctx->state = CAM_CTX_AVAILABLE;
  387. CAM_DBG(CAM_CUSTOM, "Release device success[%u] next state %d",
  388. ctx->ctx_id, ctx->state);
  389. return rc;
  390. }
  391. static int __cam_custom_ctx_apply_req_in_activated_state(
  392. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  393. {
  394. int rc = 0;
  395. struct cam_ctx_request *req;
  396. struct cam_custom_dev_ctx_req *req_custom;
  397. struct cam_custom_context *custom_ctx = NULL;
  398. struct cam_hw_config_args cfg;
  399. if (list_empty(&ctx->pending_req_list)) {
  400. CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld",
  401. apply->request_id);
  402. rc = -EFAULT;
  403. goto end;
  404. }
  405. custom_ctx = (struct cam_custom_context *) ctx->ctx_priv;
  406. spin_lock_bh(&ctx->lock);
  407. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  408. list);
  409. spin_unlock_bh(&ctx->lock);
  410. /*
  411. * Check whether the request id is matching the tip
  412. */
  413. if (req->request_id != apply->request_id) {
  414. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  415. "Invalid Request Id asking %llu existing %llu",
  416. apply->request_id, req->request_id);
  417. rc = -EFAULT;
  418. goto end;
  419. }
  420. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  421. cfg.ctxt_to_hw_map = custom_ctx->hw_ctx;
  422. cfg.request_id = req->request_id;
  423. cfg.hw_update_entries = req_custom->cfg;
  424. cfg.num_hw_update_entries = req_custom->num_cfg;
  425. cfg.priv = &req_custom->hw_update_data;
  426. cfg.init_packet = 0;
  427. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  428. if (rc) {
  429. CAM_ERR_RATE_LIMIT(CAM_CUSTOM,
  430. "Can not apply the configuration");
  431. } else {
  432. spin_lock_bh(&ctx->lock);
  433. list_del_init(&req->list);
  434. if (!req->num_out_map_entries) {
  435. list_add_tail(&req->list, &ctx->free_req_list);
  436. spin_unlock_bh(&ctx->lock);
  437. } else {
  438. list_add_tail(&req->list, &ctx->active_req_list);
  439. spin_unlock_bh(&ctx->lock);
  440. /*
  441. * for test purposes only-this should be
  442. * triggered based on irq
  443. */
  444. __cam_custom_ctx_handle_irq_in_activated(ctx, 0, NULL);
  445. }
  446. }
  447. end:
  448. return rc;
  449. }
  450. static int __cam_custom_ctx_acquire_hw_v1(
  451. struct cam_context *ctx, void *args)
  452. {
  453. int rc = 0;
  454. struct cam_acquire_hw_cmd_v1 *cmd =
  455. (struct cam_acquire_hw_cmd_v1 *)args;
  456. struct cam_hw_acquire_args param;
  457. struct cam_custom_context *ctx_custom =
  458. (struct cam_custom_context *) ctx->ctx_priv;
  459. struct cam_custom_acquire_hw_info *acquire_hw_info = NULL;
  460. if (!ctx->hw_mgr_intf) {
  461. CAM_ERR(CAM_CUSTOM, "HW interface is not ready");
  462. rc = -EFAULT;
  463. goto end;
  464. }
  465. CAM_DBG(CAM_CUSTOM,
  466. "session_hdl 0x%x, hdl type %d, res %lld",
  467. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  468. if (cmd->handle_type != 1) {
  469. CAM_ERR(CAM_CUSTOM, "Only user pointer is supported");
  470. rc = -EINVAL;
  471. goto end;
  472. }
  473. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  474. CAM_ERR(CAM_CUSTOM, "data_size is not a valid value");
  475. goto end;
  476. }
  477. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  478. if (!acquire_hw_info) {
  479. rc = -ENOMEM;
  480. goto end;
  481. }
  482. CAM_DBG(CAM_CUSTOM, "start copy resources from user");
  483. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  484. cmd->data_size)) {
  485. rc = -EFAULT;
  486. goto free_res;
  487. }
  488. memset(&param, 0, sizeof(param));
  489. param.context_data = ctx;
  490. param.event_cb = ctx->irq_cb_intf;
  491. param.acquire_info_size = cmd->data_size;
  492. param.acquire_info = (uint64_t) acquire_hw_info;
  493. /* call HW manager to reserve the resource */
  494. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  495. &param);
  496. if (rc != 0) {
  497. CAM_ERR(CAM_CUSTOM, "Acquire HW failed");
  498. goto free_res;
  499. }
  500. ctx_custom->hw_ctx = param.ctxt_to_hw_map;
  501. ctx_custom->hw_acquired = true;
  502. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  503. CAM_DBG(CAM_CUSTOM,
  504. "Acquire HW success on session_hdl 0x%xs for ctx_id %u",
  505. ctx->session_hdl, ctx->ctx_id);
  506. kfree(acquire_hw_info);
  507. return rc;
  508. free_res:
  509. kfree(acquire_hw_info);
  510. end:
  511. return rc;
  512. }
  513. static int __cam_custom_ctx_acquire_dev_in_available(
  514. struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd)
  515. {
  516. int rc = 0;
  517. struct cam_create_dev_hdl req_hdl_param;
  518. if (!ctx->hw_mgr_intf) {
  519. CAM_ERR(CAM_CUSTOM, "HW interface is not ready");
  520. rc = -EFAULT;
  521. return rc;
  522. }
  523. CAM_DBG(CAM_CUSTOM,
  524. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  525. cmd->session_handle, cmd->num_resources,
  526. cmd->handle_type, cmd->resource_hdl);
  527. if (cmd->num_resources != CAM_API_COMPAT_CONSTANT) {
  528. CAM_ERR(CAM_CUSTOM, "Invalid num_resources 0x%x",
  529. cmd->num_resources);
  530. return -EINVAL;
  531. }
  532. req_hdl_param.session_hdl = cmd->session_handle;
  533. req_hdl_param.v4l2_sub_dev_flag = 0;
  534. req_hdl_param.media_entity_flag = 0;
  535. req_hdl_param.ops = ctx->crm_ctx_intf;
  536. req_hdl_param.priv = ctx;
  537. CAM_DBG(CAM_CUSTOM, "get device handle from bridge");
  538. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  539. if (ctx->dev_hdl <= 0) {
  540. rc = -EFAULT;
  541. CAM_ERR(CAM_CUSTOM, "Can not create device handle");
  542. return rc;
  543. }
  544. cmd->dev_handle = ctx->dev_hdl;
  545. ctx->session_hdl = cmd->session_handle;
  546. ctx->state = CAM_CTX_ACQUIRED;
  547. CAM_DBG(CAM_CUSTOM,
  548. "Acquire dev success on session_hdl 0x%x for ctx %u",
  549. cmd->session_handle, ctx->ctx_id);
  550. return rc;
  551. }
  552. static int __cam_custom_ctx_enqueue_init_request(
  553. struct cam_context *ctx, struct cam_ctx_request *req)
  554. {
  555. int rc = 0;
  556. struct cam_ctx_request *req_old;
  557. struct cam_custom_dev_ctx_req *req_custom_old;
  558. struct cam_custom_dev_ctx_req *req_custom_new;
  559. spin_lock_bh(&ctx->lock);
  560. if (list_empty(&ctx->pending_req_list)) {
  561. list_add_tail(&req->list, &ctx->pending_req_list);
  562. goto end;
  563. }
  564. req_old = list_first_entry(&ctx->pending_req_list,
  565. struct cam_ctx_request, list);
  566. req_custom_old = (struct cam_custom_dev_ctx_req *) req_old->req_priv;
  567. req_custom_new = (struct cam_custom_dev_ctx_req *) req->req_priv;
  568. if (req_custom_old->hw_update_data.packet_opcode_type ==
  569. CAM_CUSTOM_PACKET_INIT_DEV) {
  570. if ((req_custom_old->num_cfg + req_custom_new->num_cfg) >=
  571. CAM_CUSTOM_CTX_CFG_MAX) {
  572. CAM_WARN(CAM_CUSTOM, "Can not merge INIT pkt");
  573. rc = -ENOMEM;
  574. }
  575. if (req_custom_old->num_fence_map_out != 0 ||
  576. req_custom_old->num_fence_map_in != 0) {
  577. CAM_WARN(CAM_CUSTOM, "Invalid INIT pkt sequence");
  578. rc = -EINVAL;
  579. }
  580. if (!rc) {
  581. memcpy(req_custom_old->fence_map_out,
  582. req_custom_new->fence_map_out,
  583. sizeof(req_custom_new->fence_map_out[0])*
  584. req_custom_new->num_fence_map_out);
  585. req_custom_old->num_fence_map_out =
  586. req_custom_new->num_fence_map_out;
  587. memcpy(req_custom_old->fence_map_in,
  588. req_custom_new->fence_map_in,
  589. sizeof(req_custom_new->fence_map_in[0])*
  590. req_custom_new->num_fence_map_in);
  591. req_custom_old->num_fence_map_in =
  592. req_custom_new->num_fence_map_in;
  593. memcpy(&req_custom_old->cfg[req_custom_old->num_cfg],
  594. req_custom_new->cfg,
  595. sizeof(req_custom_new->cfg[0])*
  596. req_custom_new->num_cfg);
  597. req_custom_old->num_cfg += req_custom_new->num_cfg;
  598. req_old->request_id = req->request_id;
  599. list_add_tail(&req->list, &ctx->free_req_list);
  600. }
  601. } else {
  602. CAM_WARN(CAM_CUSTOM,
  603. "Received Update pkt before INIT pkt. req_id= %lld",
  604. req->request_id);
  605. rc = -EINVAL;
  606. }
  607. end:
  608. spin_unlock_bh(&ctx->lock);
  609. return rc;
  610. }
  611. static int __cam_custom_ctx_config_dev(struct cam_context *ctx,
  612. struct cam_config_dev_cmd *cmd)
  613. {
  614. int rc = 0, i;
  615. struct cam_ctx_request *req = NULL;
  616. struct cam_custom_dev_ctx_req *req_custom;
  617. uintptr_t packet_addr;
  618. struct cam_packet *packet;
  619. size_t len = 0;
  620. struct cam_hw_prepare_update_args cfg;
  621. struct cam_req_mgr_add_request add_req;
  622. struct cam_custom_context *ctx_custom =
  623. (struct cam_custom_context *) ctx->ctx_priv;
  624. /* get free request */
  625. spin_lock_bh(&ctx->lock);
  626. if (!list_empty(&ctx->free_req_list)) {
  627. req = list_first_entry(&ctx->free_req_list,
  628. struct cam_ctx_request, list);
  629. list_del_init(&req->list);
  630. }
  631. spin_unlock_bh(&ctx->lock);
  632. if (!req) {
  633. CAM_ERR(CAM_CUSTOM, "No more request obj free");
  634. return -ENOMEM;
  635. }
  636. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  637. /* for config dev, only memory handle is supported */
  638. /* map packet from the memhandle */
  639. rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
  640. &packet_addr, &len);
  641. if (rc != 0) {
  642. CAM_ERR(CAM_CUSTOM, "Can not get packet address");
  643. rc = -EINVAL;
  644. goto free_req;
  645. }
  646. packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
  647. CAM_DBG(CAM_CUSTOM, "pack_handle %llx", cmd->packet_handle);
  648. CAM_DBG(CAM_CUSTOM, "packet address is 0x%zx", packet_addr);
  649. CAM_DBG(CAM_CUSTOM, "packet with length %zu, offset 0x%llx",
  650. len, cmd->offset);
  651. CAM_DBG(CAM_CUSTOM, "Packet request id %lld",
  652. packet->header.request_id);
  653. CAM_DBG(CAM_CUSTOM, "Packet size 0x%x", packet->header.size);
  654. CAM_DBG(CAM_CUSTOM, "packet op %d", packet->header.op_code);
  655. if ((((packet->header.op_code) & 0xF) ==
  656. CAM_CUSTOM_PACKET_UPDATE_DEV)
  657. && (packet->header.request_id <= ctx->last_flush_req)) {
  658. CAM_DBG(CAM_CUSTOM,
  659. "request %lld has been flushed, reject packet",
  660. packet->header.request_id);
  661. rc = -EINVAL;
  662. goto free_req;
  663. }
  664. /* preprocess the configuration */
  665. memset(&cfg, 0, sizeof(cfg));
  666. cfg.packet = packet;
  667. cfg.ctxt_to_hw_map = ctx_custom->hw_ctx;
  668. cfg.out_map_entries = req_custom->fence_map_out;
  669. cfg.in_map_entries = req_custom->fence_map_in;
  670. cfg.priv = &req_custom->hw_update_data;
  671. cfg.pf_data = &(req->pf_data);
  672. rc = ctx->hw_mgr_intf->hw_prepare_update(
  673. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  674. if (rc != 0) {
  675. CAM_ERR(CAM_CUSTOM, "Prepare config packet failed in HW layer");
  676. rc = -EFAULT;
  677. goto free_req;
  678. }
  679. req_custom->num_cfg = cfg.num_hw_update_entries;
  680. req_custom->num_fence_map_out = cfg.num_out_map_entries;
  681. req_custom->num_fence_map_in = cfg.num_in_map_entries;
  682. req_custom->num_acked = 0;
  683. for (i = 0; i < req_custom->num_fence_map_out; i++) {
  684. rc = cam_sync_get_obj_ref(req_custom->fence_map_out[i].sync_id);
  685. if (rc) {
  686. CAM_ERR(CAM_CUSTOM, "Can't get ref for fence %d",
  687. req_custom->fence_map_out[i].sync_id);
  688. goto put_ref;
  689. }
  690. }
  691. CAM_DBG(CAM_CUSTOM,
  692. "num_entry: %d, num fence out: %d, num fence in: %d",
  693. req_custom->num_cfg, req_custom->num_fence_map_out,
  694. req_custom->num_fence_map_in);
  695. req->request_id = packet->header.request_id;
  696. req->status = 1;
  697. CAM_DBG(CAM_CUSTOM, "Packet request id %lld packet opcode:%d",
  698. packet->header.request_id,
  699. req_custom->hw_update_data.packet_opcode_type);
  700. if (req_custom->hw_update_data.packet_opcode_type ==
  701. CAM_CUSTOM_PACKET_INIT_DEV) {
  702. if (ctx->state < CAM_CTX_ACTIVATED) {
  703. rc = __cam_custom_ctx_enqueue_init_request(ctx, req);
  704. if (rc)
  705. CAM_ERR(CAM_CUSTOM, "Enqueue INIT pkt failed");
  706. ctx_custom->init_received = true;
  707. } else {
  708. rc = -EINVAL;
  709. CAM_ERR(CAM_CUSTOM, "Recevied INIT pkt in wrong state");
  710. }
  711. } else {
  712. if ((ctx->state != CAM_CTX_FLUSHED) &&
  713. (ctx->state >= CAM_CTX_READY) &&
  714. (ctx->ctx_crm_intf->add_req)) {
  715. add_req.link_hdl = ctx->link_hdl;
  716. add_req.dev_hdl = ctx->dev_hdl;
  717. add_req.req_id = req->request_id;
  718. add_req.skip_before_applying = 0;
  719. rc = ctx->ctx_crm_intf->add_req(&add_req);
  720. if (rc) {
  721. CAM_ERR(CAM_CUSTOM,
  722. "Add req failed: req id=%llu",
  723. req->request_id);
  724. } else {
  725. __cam_custom_ctx_enqueue_request_in_order(
  726. ctx, req);
  727. }
  728. } else {
  729. rc = -EINVAL;
  730. CAM_ERR(CAM_CUSTOM, "Recevied Update in wrong state");
  731. }
  732. }
  733. if (rc)
  734. goto put_ref;
  735. CAM_DBG(CAM_CUSTOM,
  736. "Preprocessing Config req_id %lld successful on ctx %u",
  737. req->request_id, ctx->ctx_id);
  738. return rc;
  739. put_ref:
  740. for (--i; i >= 0; i--) {
  741. if (cam_sync_put_obj_ref(req_custom->fence_map_out[i].sync_id))
  742. CAM_ERR(CAM_CUSTOM, "Failed to put ref of fence %d",
  743. req_custom->fence_map_out[i].sync_id);
  744. }
  745. free_req:
  746. spin_lock_bh(&ctx->lock);
  747. list_add_tail(&req->list, &ctx->free_req_list);
  748. spin_unlock_bh(&ctx->lock);
  749. return rc;
  750. }
  751. static int __cam_custom_ctx_config_dev_in_flushed(struct cam_context *ctx,
  752. struct cam_config_dev_cmd *cmd)
  753. {
  754. int rc = 0;
  755. struct cam_start_stop_dev_cmd start_cmd;
  756. struct cam_custom_context *custom_ctx =
  757. (struct cam_custom_context *) ctx->ctx_priv;
  758. if (!custom_ctx->hw_acquired) {
  759. CAM_ERR(CAM_CUSTOM, "HW is not acquired, reject packet");
  760. rc = -EINVAL;
  761. goto end;
  762. }
  763. rc = __cam_custom_ctx_config_dev(ctx, cmd);
  764. if (rc)
  765. goto end;
  766. if (!custom_ctx->init_received) {
  767. CAM_WARN(CAM_CUSTOM,
  768. "Received update packet in flushed state, skip start");
  769. goto end;
  770. }
  771. start_cmd.dev_handle = cmd->dev_handle;
  772. start_cmd.session_handle = cmd->session_handle;
  773. rc = __cam_custom_ctx_start_dev_in_ready(ctx, &start_cmd);
  774. if (rc)
  775. CAM_ERR(CAM_CUSTOM,
  776. "Failed to re-start HW after flush rc: %d", rc);
  777. else
  778. CAM_INFO(CAM_CUSTOM,
  779. "Received init after flush. Re-start HW complete.");
  780. end:
  781. return rc;
  782. }
  783. static int __cam_custom_ctx_config_dev_in_acquired(struct cam_context *ctx,
  784. struct cam_config_dev_cmd *cmd)
  785. {
  786. int rc = 0;
  787. struct cam_custom_context *ctx_custom =
  788. (struct cam_custom_context *) ctx->ctx_priv;
  789. if (!ctx_custom->hw_acquired) {
  790. CAM_ERR(CAM_CUSTOM, "HW not acquired, reject config packet");
  791. return -EAGAIN;
  792. }
  793. rc = __cam_custom_ctx_config_dev(ctx, cmd);
  794. if (!rc && (ctx->link_hdl >= 0))
  795. ctx->state = CAM_CTX_READY;
  796. return rc;
  797. }
  798. static int __cam_custom_ctx_link_in_acquired(struct cam_context *ctx,
  799. struct cam_req_mgr_core_dev_link_setup *link)
  800. {
  801. struct cam_custom_context *ctx_custom =
  802. (struct cam_custom_context *) ctx->ctx_priv;
  803. ctx->link_hdl = link->link_hdl;
  804. ctx->ctx_crm_intf = link->crm_cb;
  805. ctx_custom->subscribe_event = link->subscribe_event;
  806. /* change state only if we had the init config */
  807. if (ctx_custom->init_received)
  808. ctx->state = CAM_CTX_READY;
  809. CAM_DBG(CAM_CUSTOM, "next state %d", ctx->state);
  810. return 0;
  811. }
  812. static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
  813. struct cam_start_stop_dev_cmd *cmd)
  814. {
  815. int rc = 0;
  816. struct cam_custom_start_args custom_start;
  817. struct cam_ctx_request *req;
  818. struct cam_custom_dev_ctx_req *req_custom;
  819. struct cam_custom_context *ctx_custom =
  820. (struct cam_custom_context *) ctx->ctx_priv;
  821. if (cmd->session_handle != ctx->session_hdl ||
  822. cmd->dev_handle != ctx->dev_hdl) {
  823. rc = -EPERM;
  824. goto end;
  825. }
  826. if (list_empty(&ctx->pending_req_list)) {
  827. /* should never happen */
  828. CAM_ERR(CAM_CUSTOM, "Start device with empty configuration");
  829. rc = -EFAULT;
  830. goto end;
  831. } else {
  832. req = list_first_entry(&ctx->pending_req_list,
  833. struct cam_ctx_request, list);
  834. }
  835. req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
  836. if (!ctx_custom->hw_ctx) {
  837. CAM_ERR(CAM_CUSTOM, "Wrong hw context pointer.");
  838. rc = -EFAULT;
  839. goto end;
  840. }
  841. custom_start.hw_config.ctxt_to_hw_map = ctx_custom->hw_ctx;
  842. custom_start.hw_config.request_id = req->request_id;
  843. custom_start.hw_config.hw_update_entries = req_custom->cfg;
  844. custom_start.hw_config.num_hw_update_entries = req_custom->num_cfg;
  845. custom_start.hw_config.priv = &req_custom->hw_update_data;
  846. custom_start.hw_config.init_packet = 1;
  847. if (ctx->state == CAM_CTX_FLUSHED)
  848. custom_start.start_only = true;
  849. else
  850. custom_start.start_only = false;
  851. ctx->state = CAM_CTX_ACTIVATED;
  852. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  853. &custom_start);
  854. if (rc) {
  855. /* HW failure. User need to clean up the resource */
  856. CAM_ERR(CAM_CUSTOM, "Start HW failed");
  857. ctx->state = CAM_CTX_READY;
  858. goto end;
  859. }
  860. CAM_DBG(CAM_CUSTOM, "start device success ctx %u",
  861. ctx->ctx_id);
  862. spin_lock_bh(&ctx->lock);
  863. list_del_init(&req->list);
  864. if (req_custom->num_fence_map_out)
  865. list_add_tail(&req->list, &ctx->active_req_list);
  866. else
  867. list_add_tail(&req->list, &ctx->free_req_list);
  868. spin_unlock_bh(&ctx->lock);
  869. end:
  870. return rc;
  871. }
  872. static int __cam_custom_ctx_release_dev_in_activated(struct cam_context *ctx,
  873. struct cam_release_dev_cmd *cmd)
  874. {
  875. int rc = 0;
  876. rc = __cam_custom_stop_dev_core(ctx, NULL);
  877. if (rc)
  878. CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  879. rc = __cam_custom_release_dev_in_acquired(ctx, cmd);
  880. if (rc)
  881. CAM_ERR(CAM_CUSTOM, "Release device failed rc=%d", rc);
  882. return rc;
  883. }
  884. static int __cam_custom_ctx_unlink_in_activated(struct cam_context *ctx,
  885. struct cam_req_mgr_core_dev_link_setup *unlink)
  886. {
  887. int rc = 0;
  888. CAM_WARN(CAM_CUSTOM,
  889. "Received unlink in activated state. It's unexpected");
  890. rc = __cam_custom_stop_dev_in_activated(ctx, NULL);
  891. if (rc)
  892. CAM_WARN(CAM_CUSTOM, "Stop device failed rc=%d", rc);
  893. rc = __cam_custom_ctx_unlink_in_ready(ctx, unlink);
  894. if (rc)
  895. CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc);
  896. return rc;
  897. }
  898. static int __cam_custom_ctx_process_evt(struct cam_context *ctx,
  899. struct cam_req_mgr_link_evt_data *link_evt_data)
  900. {
  901. switch (link_evt_data->evt_type) {
  902. case CAM_REQ_MGR_LINK_EVT_ERR:
  903. /* Handle error/bubble related issues */
  904. break;
  905. default:
  906. CAM_WARN(CAM_CUSTOM, "Unknown event from CRM");
  907. break;
  908. }
  909. return 0;
  910. }
  911. static int __cam_custom_ctx_handle_irq_in_activated(void *context,
  912. uint32_t evt_id, void *evt_data)
  913. {
  914. int rc;
  915. struct cam_context *ctx =
  916. (struct cam_context *)context;
  917. CAM_DBG(CAM_CUSTOM, "Enter %d", ctx->ctx_id);
  918. /*
  919. * handle based on different irq's currently
  920. * triggering only buf done if there are fences
  921. */
  922. rc = cam_context_buf_done_from_hw(ctx, evt_data, 0);
  923. if (rc)
  924. CAM_ERR(CAM_CUSTOM, "Failed in buf done, rc=%d", rc);
  925. return rc;
  926. }
  927. static int __cam_custom_ctx_acquire_hw_in_acquired(
  928. struct cam_context *ctx, void *args)
  929. {
  930. int rc = -EINVAL;
  931. uint32_t api_version;
  932. if (!ctx || !args) {
  933. CAM_ERR(CAM_CUSTOM, "Invalid input pointer");
  934. return rc;
  935. }
  936. api_version = *((uint32_t *)args);
  937. if (api_version == 1)
  938. rc = __cam_custom_ctx_acquire_hw_v1(ctx, args);
  939. else
  940. CAM_ERR(CAM_CUSTOM, "Unsupported api version %d",
  941. api_version);
  942. return rc;
  943. }
  944. /* top state machine */
  945. static struct cam_ctx_ops
  946. cam_custom_dev_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  947. /* Uninit */
  948. {
  949. .ioctl_ops = {},
  950. .crm_ops = {},
  951. .irq_ops = NULL,
  952. },
  953. /* Available */
  954. {
  955. .ioctl_ops = {
  956. .acquire_dev =
  957. __cam_custom_ctx_acquire_dev_in_available,
  958. },
  959. .crm_ops = {},
  960. .irq_ops = NULL,
  961. },
  962. /* Acquired */
  963. {
  964. .ioctl_ops = {
  965. .acquire_hw = __cam_custom_ctx_acquire_hw_in_acquired,
  966. .release_dev = __cam_custom_release_dev_in_acquired,
  967. .config_dev = __cam_custom_ctx_config_dev_in_acquired,
  968. .release_hw = __cam_custom_ctx_release_hw_in_top_state,
  969. },
  970. .crm_ops = {
  971. .link = __cam_custom_ctx_link_in_acquired,
  972. .unlink = __cam_custom_ctx_unlink_in_acquired,
  973. .get_dev_info =
  974. __cam_custom_ctx_get_dev_info_in_acquired,
  975. .flush_req = __cam_custom_ctx_flush_req_in_top_state,
  976. },
  977. .irq_ops = NULL,
  978. .pagefault_ops = NULL,
  979. },
  980. /* Ready */
  981. {
  982. .ioctl_ops = {
  983. .start_dev = __cam_custom_ctx_start_dev_in_ready,
  984. .release_dev = __cam_custom_release_dev_in_acquired,
  985. .config_dev = __cam_custom_ctx_config_dev,
  986. .release_hw = __cam_custom_ctx_release_hw_in_top_state,
  987. },
  988. .crm_ops = {
  989. .unlink = __cam_custom_ctx_unlink_in_ready,
  990. .flush_req = __cam_custom_ctx_flush_req_in_ready,
  991. },
  992. .irq_ops = NULL,
  993. .pagefault_ops = NULL,
  994. },
  995. /* Flushed */
  996. {
  997. .ioctl_ops = {
  998. .stop_dev = __cam_custom_stop_dev_in_activated,
  999. .release_dev =
  1000. __cam_custom_ctx_release_dev_in_activated,
  1001. .config_dev = __cam_custom_ctx_config_dev_in_flushed,
  1002. .release_hw =
  1003. __cam_custom_ctx_release_hw_in_activated_state,
  1004. },
  1005. .crm_ops = {
  1006. .unlink = __cam_custom_ctx_unlink_in_ready,
  1007. },
  1008. .irq_ops = NULL,
  1009. },
  1010. /* Activated */
  1011. {
  1012. .ioctl_ops = {
  1013. .stop_dev = __cam_custom_stop_dev_in_activated,
  1014. .release_dev =
  1015. __cam_custom_ctx_release_dev_in_activated,
  1016. .config_dev = __cam_custom_ctx_config_dev,
  1017. .release_hw =
  1018. __cam_custom_ctx_release_hw_in_activated_state,
  1019. },
  1020. .crm_ops = {
  1021. .unlink = __cam_custom_ctx_unlink_in_activated,
  1022. .apply_req =
  1023. __cam_custom_ctx_apply_req_in_activated_state,
  1024. .flush_req = __cam_custom_ctx_flush_req_in_top_state,
  1025. .process_evt = __cam_custom_ctx_process_evt,
  1026. },
  1027. .irq_ops = __cam_custom_ctx_handle_irq_in_activated,
  1028. .pagefault_ops = NULL,
  1029. },
  1030. };
  1031. int cam_custom_dev_context_init(struct cam_custom_context *ctx,
  1032. struct cam_context *ctx_base,
  1033. struct cam_req_mgr_kmd_ops *crm_node_intf,
  1034. struct cam_hw_mgr_intf *hw_intf,
  1035. uint32_t ctx_id)
  1036. {
  1037. int rc = -1, i = 0;
  1038. if (!ctx || !ctx_base) {
  1039. CAM_ERR(CAM_CUSTOM, "Invalid Context");
  1040. return -EINVAL;
  1041. }
  1042. /* Custom HW context setup */
  1043. memset(ctx, 0, sizeof(*ctx));
  1044. ctx->base = ctx_base;
  1045. ctx->frame_id = 0;
  1046. ctx->active_req_cnt = 0;
  1047. ctx->hw_ctx = NULL;
  1048. for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
  1049. ctx->req_base[i].req_priv = &ctx->req_custom[i];
  1050. ctx->req_custom[i].base = &ctx->req_base[i];
  1051. }
  1052. /* camera context setup */
  1053. rc = cam_context_init(ctx_base, custom_dev_name, CAM_CUSTOM, ctx_id,
  1054. crm_node_intf, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
  1055. if (rc) {
  1056. CAM_ERR(CAM_CUSTOM, "Camera Context Base init failed");
  1057. return rc;
  1058. }
  1059. /* link camera context with custom HW context */
  1060. ctx_base->state_machine = cam_custom_dev_ctx_top_state_machine;
  1061. ctx_base->ctx_priv = ctx;
  1062. return rc;
  1063. }
  1064. int cam_custom_dev_context_deinit(struct cam_custom_context *ctx)
  1065. {
  1066. if (ctx->base)
  1067. cam_context_deinit(ctx->base);
  1068. memset(ctx, 0, sizeof(*ctx));
  1069. return 0;
  1070. }