cam_sync_util.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "cam_sync_util.h"
  7. #include "cam_req_mgr_workq.h"
  8. #include "cam_common_util.h"
  9. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  10. long *idx)
  11. {
  12. int rc = 0;
  13. mutex_lock(&sync_dev->table_lock);
  14. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  15. if (*idx < CAM_SYNC_MAX_OBJS)
  16. set_bit(*idx, sync_dev->bitmap);
  17. else
  18. rc = -1;
  19. mutex_unlock(&sync_dev->table_lock);
  20. return rc;
  21. }
  22. int cam_sync_init_row(struct sync_table_row *table,
  23. uint32_t idx, const char *name, uint32_t type)
  24. {
  25. struct sync_table_row *row = table + idx;
  26. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  27. return -EINVAL;
  28. memset(row, 0, sizeof(*row));
  29. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  30. INIT_LIST_HEAD(&row->parents_list);
  31. INIT_LIST_HEAD(&row->children_list);
  32. row->type = type;
  33. row->sync_id = idx;
  34. row->state = CAM_SYNC_STATE_ACTIVE;
  35. row->remaining = 0;
  36. atomic_set(&row->ref_cnt, 0);
  37. init_completion(&row->signaled);
  38. INIT_LIST_HEAD(&row->callback_list);
  39. INIT_LIST_HEAD(&row->user_payload_list);
  40. CAM_DBG(CAM_SYNC,
  41. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  42. row->name, row->sync_id, idx, row->state);
  43. return 0;
  44. }
  45. int cam_sync_init_group_object(struct sync_table_row *table,
  46. uint32_t idx,
  47. uint32_t *sync_objs,
  48. uint32_t num_objs)
  49. {
  50. int i, rc = 0;
  51. struct sync_child_info *child_info;
  52. struct sync_parent_info *parent_info;
  53. struct sync_table_row *row = table + idx;
  54. struct sync_table_row *child_row = NULL;
  55. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  56. /*
  57. * While traversing for children, parent's row list is updated with
  58. * child info and each child's row is updated with parent info.
  59. * If any child state is ERROR or SUCCESS, it will not be added to list.
  60. */
  61. for (i = 0; i < num_objs; i++) {
  62. child_row = table + sync_objs[i];
  63. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  64. /* validate child */
  65. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  66. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  67. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  68. CAM_ERR(CAM_SYNC,
  69. "Invalid child fence:%i state:%u type:%u",
  70. child_row->sync_id, child_row->state,
  71. child_row->type);
  72. rc = -EINVAL;
  73. goto clean_children_info;
  74. }
  75. /* check for child's state */
  76. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  77. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  78. row->state = child_row->state;
  79. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  80. continue;
  81. }
  82. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  83. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  84. continue;
  85. }
  86. row->remaining++;
  87. /* Add child info */
  88. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  89. if (!child_info) {
  90. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  91. rc = -ENOMEM;
  92. goto clean_children_info;
  93. }
  94. child_info->sync_id = sync_objs[i];
  95. list_add_tail(&child_info->list, &row->children_list);
  96. /* Add parent info */
  97. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  98. if (!parent_info) {
  99. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  100. rc = -ENOMEM;
  101. goto clean_children_info;
  102. }
  103. parent_info->sync_id = idx;
  104. list_add_tail(&parent_info->list, &child_row->parents_list);
  105. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  106. }
  107. if (!row->remaining) {
  108. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  109. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  110. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  111. complete_all(&row->signaled);
  112. }
  113. return 0;
  114. clean_children_info:
  115. row->state = CAM_SYNC_STATE_INVALID;
  116. for (i = i-1; i >= 0; i--) {
  117. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  118. child_row = table + sync_objs[i];
  119. cam_sync_util_cleanup_parents_list(child_row,
  120. SYNC_LIST_CLEAN_ONE, idx);
  121. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  122. }
  123. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  124. return rc;
  125. }
  126. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
  127. struct cam_sync_check_for_dma_release *check_for_dma_release,
  128. struct cam_sync_check_for_synx_release *check_for_synx_release)
  129. {
  130. struct sync_table_row *row = table + idx;
  131. struct sync_child_info *child_info, *temp_child;
  132. struct sync_callback_info *sync_cb, *temp_cb;
  133. struct sync_parent_info *parent_info, *temp_parent;
  134. struct sync_user_payload *upayload_info, *temp_upayload;
  135. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  136. struct list_head temp_child_list, temp_parent_list;
  137. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  138. return -EINVAL;
  139. CAM_DBG(CAM_SYNC,
  140. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  141. row->name, row->sync_id, idx, row->state);
  142. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  143. if (row->state == CAM_SYNC_STATE_INVALID) {
  144. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  145. CAM_ERR(CAM_SYNC,
  146. "Error: accessing an uninitialized sync obj: idx = %d name = %s",
  147. idx,
  148. row->name);
  149. return -EINVAL;
  150. }
  151. if (row->state == CAM_SYNC_STATE_ACTIVE)
  152. CAM_DBG(CAM_SYNC,
  153. "Destroying an active sync object name:%s id:%i",
  154. row->name, row->sync_id);
  155. row->state = CAM_SYNC_STATE_INVALID;
  156. /* Object's child and parent objects will be added into this list */
  157. INIT_LIST_HEAD(&temp_child_list);
  158. INIT_LIST_HEAD(&temp_parent_list);
  159. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  160. list) {
  161. if (child_info->sync_id <= 0)
  162. continue;
  163. list_del_init(&child_info->list);
  164. list_add_tail(&child_info->list, &temp_child_list);
  165. }
  166. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  167. list) {
  168. if (parent_info->sync_id <= 0)
  169. continue;
  170. list_del_init(&parent_info->list);
  171. list_add_tail(&parent_info->list, &temp_parent_list);
  172. }
  173. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  174. /* Cleanup the child to parent link from child list */
  175. while (!list_empty(&temp_child_list)) {
  176. child_info = list_first_entry(&temp_child_list,
  177. struct sync_child_info, list);
  178. child_row = sync_dev->sync_table + child_info->sync_id;
  179. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  180. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  181. list_del_init(&child_info->list);
  182. spin_unlock_bh(&sync_dev->row_spinlocks[
  183. child_info->sync_id]);
  184. kfree(child_info);
  185. continue;
  186. }
  187. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  188. CAM_DBG(CAM_SYNC,
  189. "Warning: destroying active child sync obj = %s[%d]",
  190. child_row->name,
  191. child_info->sync_id);
  192. cam_sync_util_cleanup_parents_list(child_row,
  193. SYNC_LIST_CLEAN_ONE, idx);
  194. list_del_init(&child_info->list);
  195. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  196. kfree(child_info);
  197. }
  198. /* Cleanup the parent to child link */
  199. while (!list_empty(&temp_parent_list)) {
  200. parent_info = list_first_entry(&temp_parent_list,
  201. struct sync_parent_info, list);
  202. parent_row = sync_dev->sync_table + parent_info->sync_id;
  203. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  204. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  205. list_del_init(&parent_info->list);
  206. spin_unlock_bh(&sync_dev->row_spinlocks[
  207. parent_info->sync_id]);
  208. kfree(parent_info);
  209. continue;
  210. }
  211. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  212. CAM_DBG(CAM_SYNC,
  213. "Warning: destroying active parent sync obj = %s[%d]",
  214. parent_row->name,
  215. parent_info->sync_id);
  216. cam_sync_util_cleanup_children_list(parent_row,
  217. SYNC_LIST_CLEAN_ONE, idx);
  218. list_del_init(&parent_info->list);
  219. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  220. kfree(parent_info);
  221. }
  222. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  223. list_for_each_entry_safe(upayload_info, temp_upayload,
  224. &row->user_payload_list, list) {
  225. list_del_init(&upayload_info->list);
  226. kfree(upayload_info);
  227. }
  228. list_for_each_entry_safe(sync_cb, temp_cb,
  229. &row->callback_list, list) {
  230. list_del_init(&sync_cb->list);
  231. kfree(sync_cb);
  232. }
  233. /* Decrement ref cnt for imported dma fence */
  234. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  235. cam_dma_fence_get_put_ref(false, row->dma_fence_info.dma_fence_row_idx);
  236. /* Check if same dma fence is being released with the sync obj */
  237. if (check_for_dma_release) {
  238. if (row->dma_fence_info.dma_fence_fd ==
  239. check_for_dma_release->dma_fence_fd) {
  240. check_for_dma_release->sync_created_with_dma =
  241. row->dma_fence_info.sync_created_with_dma;
  242. check_for_dma_release->dma_fence_row_idx =
  243. row->dma_fence_info.dma_fence_row_idx;
  244. }
  245. }
  246. }
  247. /* Check if same synx obj is being released with the sync obj */
  248. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  249. if (check_for_synx_release) {
  250. if (row->synx_obj_info.synx_obj ==
  251. check_for_synx_release->synx_obj) {
  252. check_for_synx_release->synx_obj_row_idx =
  253. row->synx_obj_info.synx_obj_row_idx;
  254. check_for_synx_release->sync_created_with_synx =
  255. row->synx_obj_info.sync_created_with_synx;
  256. }
  257. }
  258. }
  259. memset(row, 0, sizeof(*row));
  260. clear_bit(idx, sync_dev->bitmap);
  261. INIT_LIST_HEAD(&row->callback_list);
  262. INIT_LIST_HEAD(&row->parents_list);
  263. INIT_LIST_HEAD(&row->children_list);
  264. INIT_LIST_HEAD(&row->user_payload_list);
  265. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  266. return 0;
  267. }
  268. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  269. {
  270. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  271. struct sync_callback_info,
  272. cb_dispatch_work);
  273. sync_callback sync_data = cb_info->callback_func;
  274. cam_common_util_thread_switch_delay_detect(
  275. "CAM-SYNC workq schedule",
  276. cb_info->workq_scheduled_ts,
  277. CAM_WORKQ_SCHEDULE_TIME_THRESHOLD);
  278. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  279. kfree(cb_info);
  280. }
  281. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  282. uint32_t status, uint32_t event_cause)
  283. {
  284. struct sync_callback_info *sync_cb;
  285. struct sync_user_payload *payload_info;
  286. struct sync_callback_info *temp_sync_cb;
  287. struct sync_table_row *signalable_row;
  288. struct sync_user_payload *temp_payload_info;
  289. signalable_row = sync_dev->sync_table + sync_obj;
  290. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  291. CAM_DBG(CAM_SYNC,
  292. "Accessing invalid sync object:%s[%i]", signalable_row->name,
  293. sync_obj);
  294. return;
  295. }
  296. /* Dispatch kernel callbacks if any were registered earlier */
  297. list_for_each_entry_safe(sync_cb,
  298. temp_sync_cb, &signalable_row->callback_list, list) {
  299. sync_cb->status = status;
  300. list_del_init(&sync_cb->list);
  301. queue_work(sync_dev->work_queue,
  302. &sync_cb->cb_dispatch_work);
  303. }
  304. /* Dispatch user payloads if any were registered earlier */
  305. list_for_each_entry_safe(payload_info, temp_payload_info,
  306. &signalable_row->user_payload_list, list) {
  307. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  308. if (!sync_dev->cam_sync_eventq) {
  309. spin_unlock_bh(
  310. &sync_dev->cam_sync_eventq_lock);
  311. break;
  312. }
  313. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  314. cam_sync_util_send_v4l2_event(
  315. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  316. sync_obj,
  317. status,
  318. payload_info->payload_data,
  319. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  320. event_cause);
  321. list_del_init(&payload_info->list);
  322. /*
  323. * We can free the list node here because
  324. * sending V4L event will make a deep copy
  325. * anyway
  326. */
  327. kfree(payload_info);
  328. }
  329. /*
  330. * This needs to be done because we want to unblock anyone
  331. * who might be blocked and waiting on this sync object
  332. */
  333. complete_all(&signalable_row->signaled);
  334. }
  335. void cam_sync_util_send_v4l2_event(uint32_t id,
  336. uint32_t sync_obj,
  337. int status,
  338. void *payload,
  339. int len, uint32_t event_cause)
  340. {
  341. struct v4l2_event event;
  342. __u64 *payload_data = NULL;
  343. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  344. struct cam_sync_ev_header_v2 *ev_header = NULL;
  345. event.id = id;
  346. event.type = CAM_SYNC_V4L_EVENT_V2;
  347. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  348. ev_header->sync_obj = sync_obj;
  349. ev_header->status = status;
  350. ev_header->version = sync_dev->version;
  351. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  352. event_cause;
  353. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  354. } else {
  355. struct cam_sync_ev_header *ev_header = NULL;
  356. event.id = id;
  357. event.type = CAM_SYNC_V4L_EVENT;
  358. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  359. ev_header->sync_obj = sync_obj;
  360. ev_header->status = status;
  361. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  362. }
  363. memcpy(payload_data, payload, len);
  364. v4l2_event_queue(sync_dev->vdev, &event);
  365. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  366. sync_dev->version,
  367. sync_obj);
  368. }
  369. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  370. int new_state)
  371. {
  372. int rc = 0;
  373. switch (parent_row->state) {
  374. case CAM_SYNC_STATE_ACTIVE:
  375. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  376. parent_row->state = new_state;
  377. break;
  378. case CAM_SYNC_STATE_SIGNALED_ERROR:
  379. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  380. break;
  381. case CAM_SYNC_STATE_INVALID:
  382. default:
  383. rc = -EINVAL;
  384. break;
  385. }
  386. return rc;
  387. }
  388. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  389. uint32_t list_clean_type, uint32_t sync_obj)
  390. {
  391. struct sync_child_info *child_info = NULL;
  392. struct sync_child_info *temp_child_info = NULL;
  393. uint32_t curr_sync_obj;
  394. list_for_each_entry_safe(child_info,
  395. temp_child_info, &row->children_list, list) {
  396. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  397. (child_info->sync_id != sync_obj))
  398. continue;
  399. curr_sync_obj = child_info->sync_id;
  400. list_del_init(&child_info->list);
  401. kfree(child_info);
  402. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  403. (curr_sync_obj == sync_obj))
  404. break;
  405. }
  406. }
  407. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  408. uint32_t list_clean_type, uint32_t sync_obj)
  409. {
  410. struct sync_parent_info *parent_info = NULL;
  411. struct sync_parent_info *temp_parent_info = NULL;
  412. uint32_t curr_sync_obj;
  413. list_for_each_entry_safe(parent_info,
  414. temp_parent_info, &row->parents_list, list) {
  415. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  416. (parent_info->sync_id != sync_obj))
  417. continue;
  418. curr_sync_obj = parent_info->sync_id;
  419. list_del_init(&parent_info->list);
  420. kfree(parent_info);
  421. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  422. (curr_sync_obj == sync_obj))
  423. break;
  424. }
  425. }