cam_sync_util.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "cam_sync_util.h"
  7. #include "cam_req_mgr_workq.h"
  8. #include "cam_common_util.h"
  9. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  10. long *idx)
  11. {
  12. int rc = 0;
  13. mutex_lock(&sync_dev->table_lock);
  14. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  15. if (*idx < CAM_SYNC_MAX_OBJS)
  16. set_bit(*idx, sync_dev->bitmap);
  17. else
  18. rc = -1;
  19. mutex_unlock(&sync_dev->table_lock);
  20. return rc;
  21. }
  22. int cam_sync_init_row(struct sync_table_row *table,
  23. uint32_t idx, const char *name, uint32_t type)
  24. {
  25. struct sync_table_row *row = table + idx;
  26. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  27. return -EINVAL;
  28. memset(row, 0, sizeof(*row));
  29. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  30. INIT_LIST_HEAD(&row->parents_list);
  31. INIT_LIST_HEAD(&row->children_list);
  32. row->type = type;
  33. row->sync_id = idx;
  34. row->state = CAM_SYNC_STATE_ACTIVE;
  35. row->remaining = 0;
  36. atomic_set(&row->ref_cnt, 0);
  37. init_completion(&row->signaled);
  38. INIT_LIST_HEAD(&row->callback_list);
  39. INIT_LIST_HEAD(&row->user_payload_list);
  40. CAM_DBG(CAM_SYNC,
  41. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  42. row->name, row->sync_id, idx, row->state);
  43. return 0;
  44. }
  45. int cam_sync_init_group_object(struct sync_table_row *table,
  46. uint32_t idx,
  47. uint32_t *sync_objs,
  48. uint32_t num_objs)
  49. {
  50. int i, rc = 0;
  51. struct sync_child_info *child_info;
  52. struct sync_parent_info *parent_info;
  53. struct sync_table_row *row = table + idx;
  54. struct sync_table_row *child_row = NULL;
  55. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  56. /*
  57. * While traversing for children, parent's row list is updated with
  58. * child info and each child's row is updated with parent info.
  59. * If any child state is ERROR or SUCCESS, it will not be added to list.
  60. */
  61. for (i = 0; i < num_objs; i++) {
  62. child_row = table + sync_objs[i];
  63. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  64. /* validate child */
  65. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  66. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  67. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  68. CAM_ERR(CAM_SYNC,
  69. "Invalid child fence:%i state:%u type:%u",
  70. child_row->sync_id, child_row->state,
  71. child_row->type);
  72. rc = -EINVAL;
  73. goto clean_children_info;
  74. }
  75. /* check for child's state */
  76. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  77. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  78. row->state = child_row->state;
  79. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  80. continue;
  81. }
  82. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  83. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  84. continue;
  85. }
  86. row->remaining++;
  87. /* Add child info */
  88. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  89. if (!child_info) {
  90. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  91. rc = -ENOMEM;
  92. goto clean_children_info;
  93. }
  94. child_info->sync_id = sync_objs[i];
  95. list_add_tail(&child_info->list, &row->children_list);
  96. /* Add parent info */
  97. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  98. if (!parent_info) {
  99. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  100. rc = -ENOMEM;
  101. goto clean_children_info;
  102. }
  103. parent_info->sync_id = idx;
  104. list_add_tail(&parent_info->list, &child_row->parents_list);
  105. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  106. }
  107. if (!row->remaining) {
  108. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  109. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  110. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  111. complete_all(&row->signaled);
  112. }
  113. return 0;
  114. clean_children_info:
  115. row->state = CAM_SYNC_STATE_INVALID;
  116. for (i = i-1; i >= 0; i--) {
  117. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  118. child_row = table + sync_objs[i];
  119. cam_sync_util_cleanup_parents_list(child_row,
  120. SYNC_LIST_CLEAN_ONE, idx);
  121. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  122. }
  123. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  124. return rc;
  125. }
  126. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
  127. struct cam_sync_check_for_dma_release *check_for_dma_release)
  128. {
  129. struct sync_table_row *row = table + idx;
  130. struct sync_child_info *child_info, *temp_child;
  131. struct sync_callback_info *sync_cb, *temp_cb;
  132. struct sync_parent_info *parent_info, *temp_parent;
  133. struct sync_user_payload *upayload_info, *temp_upayload;
  134. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  135. struct list_head temp_child_list, temp_parent_list;
  136. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  137. return -EINVAL;
  138. CAM_DBG(CAM_SYNC,
  139. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  140. row->name, row->sync_id, idx, row->state);
  141. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  142. if (row->state == CAM_SYNC_STATE_INVALID) {
  143. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  144. CAM_ERR(CAM_SYNC,
  145. "Error: accessing an uninitialized sync obj: idx = %d name = %s",
  146. idx,
  147. row->name);
  148. return -EINVAL;
  149. }
  150. if (row->state == CAM_SYNC_STATE_ACTIVE)
  151. CAM_DBG(CAM_SYNC,
  152. "Destroying an active sync object name:%s id:%i",
  153. row->name, row->sync_id);
  154. row->state = CAM_SYNC_STATE_INVALID;
  155. /* Object's child and parent objects will be added into this list */
  156. INIT_LIST_HEAD(&temp_child_list);
  157. INIT_LIST_HEAD(&temp_parent_list);
  158. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  159. list) {
  160. if (child_info->sync_id <= 0)
  161. continue;
  162. list_del_init(&child_info->list);
  163. list_add_tail(&child_info->list, &temp_child_list);
  164. }
  165. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  166. list) {
  167. if (parent_info->sync_id <= 0)
  168. continue;
  169. list_del_init(&parent_info->list);
  170. list_add_tail(&parent_info->list, &temp_parent_list);
  171. }
  172. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  173. /* Cleanup the child to parent link from child list */
  174. while (!list_empty(&temp_child_list)) {
  175. child_info = list_first_entry(&temp_child_list,
  176. struct sync_child_info, list);
  177. child_row = sync_dev->sync_table + child_info->sync_id;
  178. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  179. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  180. list_del_init(&child_info->list);
  181. spin_unlock_bh(&sync_dev->row_spinlocks[
  182. child_info->sync_id]);
  183. kfree(child_info);
  184. continue;
  185. }
  186. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  187. CAM_DBG(CAM_SYNC,
  188. "Warning: destroying active child sync obj = %s[%d]",
  189. child_row->name,
  190. child_info->sync_id);
  191. cam_sync_util_cleanup_parents_list(child_row,
  192. SYNC_LIST_CLEAN_ONE, idx);
  193. list_del_init(&child_info->list);
  194. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  195. kfree(child_info);
  196. }
  197. /* Cleanup the parent to child link */
  198. while (!list_empty(&temp_parent_list)) {
  199. parent_info = list_first_entry(&temp_parent_list,
  200. struct sync_parent_info, list);
  201. parent_row = sync_dev->sync_table + parent_info->sync_id;
  202. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  203. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  204. list_del_init(&parent_info->list);
  205. spin_unlock_bh(&sync_dev->row_spinlocks[
  206. parent_info->sync_id]);
  207. kfree(parent_info);
  208. continue;
  209. }
  210. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  211. CAM_DBG(CAM_SYNC,
  212. "Warning: destroying active parent sync obj = %s[%d]",
  213. parent_row->name,
  214. parent_info->sync_id);
  215. cam_sync_util_cleanup_children_list(parent_row,
  216. SYNC_LIST_CLEAN_ONE, idx);
  217. list_del_init(&parent_info->list);
  218. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  219. kfree(parent_info);
  220. }
  221. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  222. list_for_each_entry_safe(upayload_info, temp_upayload,
  223. &row->user_payload_list, list) {
  224. list_del_init(&upayload_info->list);
  225. kfree(upayload_info);
  226. }
  227. list_for_each_entry_safe(sync_cb, temp_cb,
  228. &row->callback_list, list) {
  229. list_del_init(&sync_cb->list);
  230. kfree(sync_cb);
  231. }
  232. /* Decrement ref cnt for imported dma fence */
  233. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  234. cam_dma_fence_get_put_ref(false, row->dma_fence_info.dma_fence_row_idx);
  235. /* Check if same dma fence is being released with the sync obj */
  236. if (check_for_dma_release) {
  237. if (row->dma_fence_info.dma_fence_fd ==
  238. check_for_dma_release->dma_fence_fd) {
  239. check_for_dma_release->sync_created_with_dma =
  240. row->dma_fence_info.sync_created_with_dma;
  241. check_for_dma_release->dma_fence_row_idx =
  242. row->dma_fence_info.dma_fence_row_idx;
  243. }
  244. }
  245. }
  246. memset(row, 0, sizeof(*row));
  247. clear_bit(idx, sync_dev->bitmap);
  248. INIT_LIST_HEAD(&row->callback_list);
  249. INIT_LIST_HEAD(&row->parents_list);
  250. INIT_LIST_HEAD(&row->children_list);
  251. INIT_LIST_HEAD(&row->user_payload_list);
  252. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  253. return 0;
  254. }
  255. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  256. {
  257. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  258. struct sync_callback_info,
  259. cb_dispatch_work);
  260. sync_callback sync_data = cb_info->callback_func;
  261. cam_common_util_thread_switch_delay_detect(
  262. "CAM-SYNC workq schedule",
  263. cb_info->workq_scheduled_ts,
  264. CAM_WORKQ_SCHEDULE_TIME_THRESHOLD);
  265. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  266. kfree(cb_info);
  267. }
  268. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  269. uint32_t status, uint32_t event_cause)
  270. {
  271. struct sync_callback_info *sync_cb;
  272. struct sync_user_payload *payload_info;
  273. struct sync_callback_info *temp_sync_cb;
  274. struct sync_table_row *signalable_row;
  275. struct sync_user_payload *temp_payload_info;
  276. signalable_row = sync_dev->sync_table + sync_obj;
  277. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  278. CAM_DBG(CAM_SYNC,
  279. "Accessing invalid sync object:%s[%i]", signalable_row->name,
  280. sync_obj);
  281. return;
  282. }
  283. /* Dispatch kernel callbacks if any were registered earlier */
  284. list_for_each_entry_safe(sync_cb,
  285. temp_sync_cb, &signalable_row->callback_list, list) {
  286. sync_cb->status = status;
  287. list_del_init(&sync_cb->list);
  288. queue_work(sync_dev->work_queue,
  289. &sync_cb->cb_dispatch_work);
  290. }
  291. /* Dispatch user payloads if any were registered earlier */
  292. list_for_each_entry_safe(payload_info, temp_payload_info,
  293. &signalable_row->user_payload_list, list) {
  294. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  295. if (!sync_dev->cam_sync_eventq) {
  296. spin_unlock_bh(
  297. &sync_dev->cam_sync_eventq_lock);
  298. break;
  299. }
  300. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  301. cam_sync_util_send_v4l2_event(
  302. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  303. sync_obj,
  304. status,
  305. payload_info->payload_data,
  306. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  307. event_cause);
  308. list_del_init(&payload_info->list);
  309. /*
  310. * We can free the list node here because
  311. * sending V4L event will make a deep copy
  312. * anyway
  313. */
  314. kfree(payload_info);
  315. }
  316. /*
  317. * This needs to be done because we want to unblock anyone
  318. * who might be blocked and waiting on this sync object
  319. */
  320. complete_all(&signalable_row->signaled);
  321. }
  322. void cam_sync_util_send_v4l2_event(uint32_t id,
  323. uint32_t sync_obj,
  324. int status,
  325. void *payload,
  326. int len, uint32_t event_cause)
  327. {
  328. struct v4l2_event event;
  329. __u64 *payload_data = NULL;
  330. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  331. struct cam_sync_ev_header_v2 *ev_header = NULL;
  332. event.id = id;
  333. event.type = CAM_SYNC_V4L_EVENT_V2;
  334. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  335. ev_header->sync_obj = sync_obj;
  336. ev_header->status = status;
  337. ev_header->version = sync_dev->version;
  338. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  339. event_cause;
  340. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  341. } else {
  342. struct cam_sync_ev_header *ev_header = NULL;
  343. event.id = id;
  344. event.type = CAM_SYNC_V4L_EVENT;
  345. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  346. ev_header->sync_obj = sync_obj;
  347. ev_header->status = status;
  348. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  349. }
  350. memcpy(payload_data, payload, len);
  351. v4l2_event_queue(sync_dev->vdev, &event);
  352. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  353. sync_dev->version,
  354. sync_obj);
  355. }
  356. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  357. int new_state)
  358. {
  359. int rc = 0;
  360. switch (parent_row->state) {
  361. case CAM_SYNC_STATE_ACTIVE:
  362. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  363. parent_row->state = new_state;
  364. break;
  365. case CAM_SYNC_STATE_SIGNALED_ERROR:
  366. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  367. break;
  368. case CAM_SYNC_STATE_INVALID:
  369. default:
  370. rc = -EINVAL;
  371. break;
  372. }
  373. return rc;
  374. }
  375. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  376. uint32_t list_clean_type, uint32_t sync_obj)
  377. {
  378. struct sync_child_info *child_info = NULL;
  379. struct sync_child_info *temp_child_info = NULL;
  380. uint32_t curr_sync_obj;
  381. list_for_each_entry_safe(child_info,
  382. temp_child_info, &row->children_list, list) {
  383. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  384. (child_info->sync_id != sync_obj))
  385. continue;
  386. curr_sync_obj = child_info->sync_id;
  387. list_del_init(&child_info->list);
  388. kfree(child_info);
  389. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  390. (curr_sync_obj == sync_obj))
  391. break;
  392. }
  393. }
  394. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  395. uint32_t list_clean_type, uint32_t sync_obj)
  396. {
  397. struct sync_parent_info *parent_info = NULL;
  398. struct sync_parent_info *temp_parent_info = NULL;
  399. uint32_t curr_sync_obj;
  400. list_for_each_entry_safe(parent_info,
  401. temp_parent_info, &row->parents_list, list) {
  402. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  403. (parent_info->sync_id != sync_obj))
  404. continue;
  405. curr_sync_obj = parent_info->sync_id;
  406. list_del_init(&parent_info->list);
  407. kfree(parent_info);
  408. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  409. (curr_sync_obj == sync_obj))
  410. break;
  411. }
  412. }