cam_sync_util.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "cam_sync_util.h"
  7. #include "cam_req_mgr_workq.h"
  8. #include "cam_common_util.h"
  9. extern unsigned long cam_sync_monitor_mask;
  10. static int cam_generic_expand_monitor_table(int idx, struct mutex *lock,
  11. struct cam_generic_fence_monitor_data **mon_data)
  12. {
  13. struct cam_generic_fence_monitor_data *row_mon_data;
  14. mutex_lock(lock);
  15. row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)];
  16. if (!row_mon_data) {
  17. row_mon_data = kzalloc(
  18. sizeof(struct cam_generic_fence_monitor_data) *
  19. CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, GFP_KERNEL);
  20. mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)] = row_mon_data;
  21. }
  22. if (!row_mon_data) {
  23. CAM_ERR(CAM_SYNC, "Error allocating memory %d, idx %d",
  24. sizeof(struct cam_generic_fence_monitor_data) *
  25. CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, idx);
  26. mutex_unlock(lock);
  27. return -ENOMEM;
  28. }
  29. mutex_unlock(lock);
  30. return 0;
  31. }
  32. static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_monitor_entries(int idx)
  33. {
  34. struct cam_generic_fence_monitor_data *mon_data;
  35. mon_data = CAM_SYNC_MONITOR_GET_DATA(idx);
  36. if (mon_data->swap_monitor_entries)
  37. return mon_data->prev_monitor_entries;
  38. else
  39. return mon_data->monitor_entries;
  40. }
  41. static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_prev_monitor_entries(int idx)
  42. {
  43. struct cam_generic_fence_monitor_data *mon_data;
  44. mon_data = CAM_SYNC_MONITOR_GET_DATA(idx);
  45. if (mon_data->swap_monitor_entries)
  46. return mon_data->monitor_entries;
  47. else
  48. return mon_data->prev_monitor_entries;
  49. }
  50. const char *cam_fence_op_to_string(
  51. enum cam_fence_op op)
  52. {
  53. switch (op) {
  54. case CAM_FENCE_OP_CREATE:
  55. return "CREATE";
  56. case CAM_FENCE_OP_REGISTER_CB:
  57. return "REGISTER_CB";
  58. case CAM_FENCE_OP_SIGNAL:
  59. return "SIGNAL";
  60. case CAM_FENCE_OP_UNREGISTER_ON_SIGNAL:
  61. return "UNREGISTER_ON_SIGNAL";
  62. case CAM_FENCE_OP_UNREGISTER_CB:
  63. return "UNREGISTER_CB";
  64. case CAM_FENCE_OP_SKIP_REGISTER_CB:
  65. return "SKIP_REGISTER_CB";
  66. case CAM_FENCE_OP_ALREADY_REGISTERED_CB:
  67. return "ALREADY_REGISTERED_CB";
  68. case CAM_FENCE_OP_DESTROY:
  69. return "DESTROY";
  70. default:
  71. return "INVALID";
  72. }
  73. }
  74. static void __cam_sync_save_previous_monitor_data(
  75. struct sync_table_row *row)
  76. {
  77. struct cam_generic_fence_monitor_data *row_mon_data;
  78. if (!sync_dev->mon_data)
  79. return;
  80. row_mon_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id);
  81. /* save current usage details into prev variables */
  82. strscpy(row_mon_data->prev_name, row->name, SYNC_DEBUG_NAME_LEN);
  83. row_mon_data->prev_type = row->type;
  84. row_mon_data->prev_obj_id = row->sync_id;
  85. row_mon_data->prev_state = row->state;
  86. row_mon_data->prev_remaining = row->remaining;
  87. row_mon_data->prev_monitor_head = atomic64_read(&row_mon_data->monitor_head);
  88. /* Toggle swap flag. Avoid copying and just read/write using correct table idx */
  89. row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries;
  90. }
  91. void cam_generic_fence_update_monitor_array(int idx,
  92. struct mutex *lock,
  93. struct cam_generic_fence_monitor_data **mon_data,
  94. enum cam_fence_op op)
  95. {
  96. int iterator, rc;
  97. struct cam_generic_fence_monitor_data *row_mon_data;
  98. struct cam_generic_fence_monitor_entry *row_mon_entries;
  99. /* Validate inputs */
  100. if (!lock || !mon_data)
  101. return;
  102. row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)];
  103. if (!row_mon_data) {
  104. rc = cam_generic_expand_monitor_table(idx, lock, mon_data);
  105. if (rc) {
  106. CAM_ERR(CAM_SYNC, "Failed to expand monitor table");
  107. return;
  108. }
  109. }
  110. row_mon_data = CAM_GENERIC_MONITOR_GET_DATA(mon_data, idx);
  111. if (op == CAM_FENCE_OP_CREATE)
  112. atomic64_set(&row_mon_data->monitor_head, -1);
  113. if (row_mon_data->swap_monitor_entries)
  114. row_mon_entries = row_mon_data->monitor_entries;
  115. else
  116. row_mon_entries = row_mon_data->prev_monitor_entries;
  117. CAM_SYNC_INC_MONITOR_HEAD(&row_mon_data->monitor_head, &iterator);
  118. CAM_GET_TIMESTAMP(row_mon_entries[iterator].timestamp);
  119. row_mon_entries[iterator].op = op;
  120. }
  121. static void __cam_generic_fence_dump_monitor_entries(
  122. struct cam_generic_fence_monitor_entry *monitor_entries,
  123. uint32_t index, uint32_t num_entries)
  124. {
  125. int i = 0;
  126. uint64_t ms, hrs, min, sec;
  127. for (i = 0; i < num_entries; i++) {
  128. CAM_CONVERT_TIMESTAMP_FORMAT(monitor_entries[index].timestamp,
  129. hrs, min, sec, ms);
  130. CAM_INFO(CAM_SYNC,
  131. "**** %llu:%llu:%llu.%llu : Index[%d] Op[%s]",
  132. hrs, min, sec, ms,
  133. index,
  134. cam_fence_op_to_string(monitor_entries[index].op));
  135. index = (index + 1) % CAM_SYNC_MONITOR_MAX_ENTRIES;
  136. }
  137. }
  138. static int __cam_generic_fence_get_monitor_entries_info(uint64_t state_head,
  139. uint32_t *oldest_entry, uint32_t *num_entries)
  140. {
  141. *oldest_entry = 0;
  142. *num_entries = 0;
  143. if (state_head == -1) {
  144. return -EINVAL;
  145. } else if (state_head < CAM_SYNC_MONITOR_MAX_ENTRIES) {
  146. /* head starts from -1 */
  147. *num_entries = state_head + 1;
  148. *oldest_entry = 0;
  149. } else {
  150. *num_entries = CAM_SYNC_MONITOR_MAX_ENTRIES;
  151. div_u64_rem(state_head + 1,
  152. CAM_SYNC_MONITOR_MAX_ENTRIES, oldest_entry);
  153. }
  154. return 0;
  155. }
  156. void cam_generic_fence_dump_monitor_array(
  157. struct cam_generic_fence_monitor_obj_info *obj_info)
  158. {
  159. int rc;
  160. uint32_t num_entries, oldest_entry;
  161. uint64_t ms, hrs, min, sec;
  162. struct timespec64 current_ts;
  163. struct cam_generic_fence_monitor_data *mon_data = obj_info->monitor_data;
  164. /* Check if there are any current entries in the monitor data */
  165. rc = __cam_generic_fence_get_monitor_entries_info(
  166. atomic64_read(&mon_data->monitor_head),
  167. &oldest_entry, &num_entries);
  168. if (rc)
  169. return;
  170. /* Print current monitor entries */
  171. CAM_GET_TIMESTAMP(current_ts);
  172. CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
  173. switch (obj_info->fence_type) {
  174. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  175. CAM_INFO(CAM_SYNC,
  176. "======== %llu:%llu:%llu:%llu Dumping monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d ref_cnt %d num_entries %u ===========",
  177. hrs, min, sec, ms, obj_info->name, obj_info->sync_type,
  178. obj_info->obj_id, obj_info->state, obj_info->remaining,
  179. obj_info->ref_cnt, num_entries);
  180. break;
  181. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  182. CAM_INFO(CAM_DMA_FENCE,
  183. "======== %llu:%llu:%llu:%llu Dumping monitor information for dma obj %s, fd %d sync_id %d state %d ref_cnt %d num_entries %u ===========",
  184. hrs, min, sec, ms, obj_info->name, obj_info->obj_id,
  185. obj_info->sync_id, obj_info->state, obj_info->ref_cnt,
  186. num_entries);
  187. break;
  188. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  189. CAM_INFO(CAM_SYNX,
  190. "======== %llu:%llu:%llu:%llu Dumping monitor information for synx obj %s, synx_id %d sync_id %d state %d ref_cnt %d num_entries %u ===========",
  191. hrs, min, sec, ms, obj_info->name, obj_info->obj_id,
  192. obj_info->sync_id, obj_info->state, obj_info->ref_cnt,
  193. num_entries);
  194. break;
  195. default:
  196. break;
  197. }
  198. __cam_generic_fence_dump_monitor_entries(obj_info->monitor_entries,
  199. oldest_entry, num_entries);
  200. /* Check if there are any previous entries in the monitor data */
  201. rc = __cam_generic_fence_get_monitor_entries_info(
  202. mon_data->prev_monitor_head,
  203. &oldest_entry, &num_entries);
  204. if (rc)
  205. return;
  206. /* Print previous monitor entries */
  207. CAM_GET_TIMESTAMP(current_ts);
  208. CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
  209. switch (obj_info->fence_type) {
  210. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  211. CAM_INFO(CAM_SYNC,
  212. "======== %llu:%llu:%llu:%llu Dumping previous monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d num_entries %u ===========",
  213. hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_type,
  214. mon_data->prev_obj_id, mon_data->prev_state, mon_data->prev_remaining,
  215. num_entries);
  216. break;
  217. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  218. CAM_INFO(CAM_DMA_FENCE,
  219. "======== %llu:%llu:%llu:%llu Dumping previous monitor information for dma obj %s, fd %d sync_id %d state %d num_entries %u ===========",
  220. hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id,
  221. mon_data->prev_sync_id, mon_data->prev_state,
  222. num_entries);
  223. break;
  224. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  225. CAM_INFO(CAM_SYNX,
  226. "======== %llu:%llu:%llu:%llu Dumping previous monitor information for synx obj %s, synx_id %d sync_id %d state %d num_entries %u ===========",
  227. hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id,
  228. mon_data->prev_sync_id, mon_data->prev_state,
  229. num_entries);
  230. break;
  231. default:
  232. break;
  233. }
  234. __cam_generic_fence_dump_monitor_entries(obj_info->prev_monitor_entries,
  235. oldest_entry, num_entries);
  236. }
  237. void cam_sync_dump_monitor_array(struct sync_table_row *row)
  238. {
  239. struct cam_generic_fence_monitor_obj_info obj_info;
  240. if (!sync_dev->mon_data ||
  241. !test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask) ||
  242. !(CAM_GENERIC_MONITOR_GET_DATA(sync_dev->mon_data, row->sync_id)->prev_obj_id))
  243. return;
  244. obj_info.name = row->name;
  245. obj_info.sync_type = row->type;
  246. obj_info.obj_id = row->sync_id;
  247. obj_info.state = row->state;
  248. obj_info.remaining = row->remaining;
  249. obj_info.ref_cnt = atomic_read(&row->ref_cnt);
  250. obj_info.monitor_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id);
  251. obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_SYNC_OBJ;
  252. obj_info.monitor_entries =
  253. __cam_sync_get_monitor_entries(row->sync_id);
  254. obj_info.prev_monitor_entries =
  255. __cam_sync_get_prev_monitor_entries(row->sync_id);
  256. cam_generic_fence_dump_monitor_array(&obj_info);
  257. }
  258. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  259. long *idx)
  260. {
  261. int rc = 0;
  262. mutex_lock(&sync_dev->table_lock);
  263. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  264. if (*idx < CAM_SYNC_MAX_OBJS)
  265. set_bit(*idx, sync_dev->bitmap);
  266. else
  267. rc = -1;
  268. mutex_unlock(&sync_dev->table_lock);
  269. return rc;
  270. }
  271. int cam_sync_init_row(struct sync_table_row *table,
  272. uint32_t idx, const char *name, uint32_t type)
  273. {
  274. struct sync_table_row *row = table + idx;
  275. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  276. return -EINVAL;
  277. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  278. INIT_LIST_HEAD(&row->parents_list);
  279. INIT_LIST_HEAD(&row->children_list);
  280. row->type = type;
  281. row->sync_id = idx;
  282. row->state = CAM_SYNC_STATE_ACTIVE;
  283. row->remaining = 0;
  284. atomic_set(&row->ref_cnt, 0);
  285. init_completion(&row->signaled);
  286. INIT_LIST_HEAD(&row->callback_list);
  287. INIT_LIST_HEAD(&row->user_payload_list);
  288. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  289. cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock,
  290. sync_dev->mon_data,
  291. CAM_FENCE_OP_CREATE);
  292. }
  293. CAM_DBG(CAM_SYNC,
  294. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  295. row->name, row->sync_id, idx, row->state);
  296. return 0;
  297. }
  298. int cam_sync_init_group_object(struct sync_table_row *table,
  299. uint32_t idx,
  300. uint32_t *sync_objs,
  301. uint32_t num_objs)
  302. {
  303. int i, rc;
  304. struct sync_child_info *child_info;
  305. struct sync_parent_info *parent_info;
  306. struct sync_table_row *row = table + idx;
  307. struct sync_table_row *child_row = NULL;
  308. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  309. /*
  310. * While traversing for children, parent's row list is updated with
  311. * child info and each child's row is updated with parent info.
  312. * If any child state is ERROR or SUCCESS, it will not be added to list.
  313. */
  314. for (i = 0; i < num_objs; i++) {
  315. child_row = table + sync_objs[i];
  316. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  317. /* validate child */
  318. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  319. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  320. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  321. CAM_ERR(CAM_SYNC,
  322. "Invalid child fence:%i state:%u type:%u",
  323. child_row->sync_id, child_row->state,
  324. child_row->type);
  325. rc = -EINVAL;
  326. goto clean_children_info;
  327. }
  328. /* check for child's state */
  329. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  330. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  331. row->state = child_row->state;
  332. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  333. continue;
  334. }
  335. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  336. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  337. continue;
  338. }
  339. row->remaining++;
  340. /* Add child info */
  341. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  342. if (!child_info) {
  343. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  344. rc = -ENOMEM;
  345. goto clean_children_info;
  346. }
  347. child_info->sync_id = sync_objs[i];
  348. list_add_tail(&child_info->list, &row->children_list);
  349. /* Add parent info */
  350. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  351. if (!parent_info) {
  352. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  353. rc = -ENOMEM;
  354. goto clean_children_info;
  355. }
  356. parent_info->sync_id = idx;
  357. list_add_tail(&parent_info->list, &child_row->parents_list);
  358. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  359. }
  360. if (!row->remaining) {
  361. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  362. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  363. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  364. complete_all(&row->signaled);
  365. }
  366. return 0;
  367. clean_children_info:
  368. row->state = CAM_SYNC_STATE_INVALID;
  369. for (i = i-1; i >= 0; i--) {
  370. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  371. child_row = table + sync_objs[i];
  372. cam_sync_util_cleanup_parents_list(child_row,
  373. SYNC_LIST_CLEAN_ONE, idx);
  374. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  375. }
  376. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  377. return rc;
  378. }
  379. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
  380. struct cam_sync_check_for_dma_release *check_for_dma_release,
  381. struct cam_sync_check_for_synx_release *check_for_synx_release)
  382. {
  383. struct sync_table_row *row = table + idx;
  384. struct sync_child_info *child_info, *temp_child;
  385. struct sync_callback_info *sync_cb, *temp_cb;
  386. struct sync_parent_info *parent_info, *temp_parent;
  387. struct sync_user_payload *upayload_info, *temp_upayload;
  388. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  389. struct list_head temp_child_list, temp_parent_list;
  390. if (!table || (idx <= 0) || (idx >= CAM_SYNC_MAX_OBJS))
  391. return -EINVAL;
  392. CAM_DBG(CAM_SYNC,
  393. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  394. row->name, row->sync_id, idx, row->state);
  395. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  396. if (row->state == CAM_SYNC_STATE_INVALID) {
  397. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  398. CAM_ERR(CAM_SYNC,
  399. "Error: accessing an uninitialized sync obj: idx = %d name = %s",
  400. idx,
  401. row->name);
  402. return -EINVAL;
  403. }
  404. if (row->state == CAM_SYNC_STATE_ACTIVE)
  405. CAM_DBG(CAM_SYNC,
  406. "Destroying an active sync object name:%s id:%i",
  407. row->name, row->sync_id);
  408. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  409. cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock,
  410. sync_dev->mon_data,
  411. CAM_FENCE_OP_DESTROY);
  412. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ_DUMP, &cam_sync_monitor_mask))
  413. cam_sync_dump_monitor_array(row);
  414. __cam_sync_save_previous_monitor_data(row);
  415. }
  416. row->state = CAM_SYNC_STATE_INVALID;
  417. /* Object's child and parent objects will be added into this list */
  418. INIT_LIST_HEAD(&temp_child_list);
  419. INIT_LIST_HEAD(&temp_parent_list);
  420. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  421. list) {
  422. if (child_info->sync_id <= 0)
  423. continue;
  424. list_del_init(&child_info->list);
  425. list_add_tail(&child_info->list, &temp_child_list);
  426. }
  427. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  428. list) {
  429. if (parent_info->sync_id <= 0)
  430. continue;
  431. list_del_init(&parent_info->list);
  432. list_add_tail(&parent_info->list, &temp_parent_list);
  433. }
  434. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  435. /* Cleanup the child to parent link from child list */
  436. while (!list_empty(&temp_child_list)) {
  437. child_info = list_first_entry(&temp_child_list,
  438. struct sync_child_info, list);
  439. child_row = sync_dev->sync_table + child_info->sync_id;
  440. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  441. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  442. list_del_init(&child_info->list);
  443. spin_unlock_bh(&sync_dev->row_spinlocks[
  444. child_info->sync_id]);
  445. kfree(child_info);
  446. continue;
  447. }
  448. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  449. CAM_DBG(CAM_SYNC,
  450. "Warning: destroying active child sync obj = %s[%d]",
  451. child_row->name,
  452. child_info->sync_id);
  453. cam_sync_util_cleanup_parents_list(child_row,
  454. SYNC_LIST_CLEAN_ONE, idx);
  455. list_del_init(&child_info->list);
  456. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  457. kfree(child_info);
  458. }
  459. /* Cleanup the parent to child link */
  460. while (!list_empty(&temp_parent_list)) {
  461. parent_info = list_first_entry(&temp_parent_list,
  462. struct sync_parent_info, list);
  463. parent_row = sync_dev->sync_table + parent_info->sync_id;
  464. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  465. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  466. list_del_init(&parent_info->list);
  467. spin_unlock_bh(&sync_dev->row_spinlocks[
  468. parent_info->sync_id]);
  469. kfree(parent_info);
  470. continue;
  471. }
  472. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  473. CAM_DBG(CAM_SYNC,
  474. "Warning: destroying active parent sync obj = %s[%d]",
  475. parent_row->name,
  476. parent_info->sync_id);
  477. cam_sync_util_cleanup_children_list(parent_row,
  478. SYNC_LIST_CLEAN_ONE, idx);
  479. list_del_init(&parent_info->list);
  480. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  481. kfree(parent_info);
  482. }
  483. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  484. list_for_each_entry_safe(upayload_info, temp_upayload,
  485. &row->user_payload_list, list) {
  486. list_del_init(&upayload_info->list);
  487. kfree(upayload_info);
  488. }
  489. list_for_each_entry_safe(sync_cb, temp_cb,
  490. &row->callback_list, list) {
  491. list_del_init(&sync_cb->list);
  492. kfree(sync_cb);
  493. }
  494. /* Decrement ref cnt for imported dma fence */
  495. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  496. cam_dma_fence_get_put_ref(false, row->dma_fence_info.dma_fence_row_idx);
  497. /* Check if same dma fence is being released with the sync obj */
  498. if (check_for_dma_release) {
  499. if (row->dma_fence_info.dma_fence_fd ==
  500. check_for_dma_release->dma_fence_fd) {
  501. check_for_dma_release->sync_created_with_dma =
  502. row->dma_fence_info.sync_created_with_dma;
  503. check_for_dma_release->dma_fence_row_idx =
  504. row->dma_fence_info.dma_fence_row_idx;
  505. }
  506. }
  507. }
  508. /* Check if same synx obj is being released with the sync obj */
  509. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  510. if (check_for_synx_release) {
  511. if (row->synx_obj_info.synx_obj ==
  512. check_for_synx_release->synx_obj) {
  513. check_for_synx_release->synx_obj_row_idx =
  514. row->synx_obj_info.synx_obj_row_idx;
  515. check_for_synx_release->sync_created_with_synx =
  516. row->synx_obj_info.sync_created_with_synx;
  517. }
  518. }
  519. }
  520. memset(row, 0, sizeof(*row));
  521. clear_bit(idx, sync_dev->bitmap);
  522. INIT_LIST_HEAD(&row->callback_list);
  523. INIT_LIST_HEAD(&row->parents_list);
  524. INIT_LIST_HEAD(&row->children_list);
  525. INIT_LIST_HEAD(&row->user_payload_list);
  526. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  527. return 0;
  528. }
  529. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  530. {
  531. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  532. struct sync_callback_info,
  533. cb_dispatch_work);
  534. sync_callback sync_data = cb_info->callback_func;
  535. cam_common_util_thread_switch_delay_detect(
  536. "CAM-SYNC workq schedule",
  537. cb_info->workq_scheduled_ts,
  538. CAM_WORKQ_SCHEDULE_TIME_THRESHOLD);
  539. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  540. kfree(cb_info);
  541. }
  542. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  543. uint32_t status, uint32_t event_cause)
  544. {
  545. struct sync_callback_info *sync_cb;
  546. struct sync_user_payload *payload_info;
  547. struct sync_callback_info *temp_sync_cb;
  548. struct sync_table_row *signalable_row;
  549. struct sync_user_payload *temp_payload_info;
  550. signalable_row = sync_dev->sync_table + sync_obj;
  551. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  552. CAM_DBG(CAM_SYNC,
  553. "Accessing invalid sync object:%s[%i]", signalable_row->name,
  554. sync_obj);
  555. return;
  556. }
  557. /* Dispatch kernel callbacks if any were registered earlier */
  558. list_for_each_entry_safe(sync_cb,
  559. temp_sync_cb, &signalable_row->callback_list, list) {
  560. sync_cb->status = status;
  561. list_del_init(&sync_cb->list);
  562. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  563. &cam_sync_monitor_mask))
  564. cam_generic_fence_update_monitor_array(sync_obj,
  565. &sync_dev->table_lock, sync_dev->mon_data,
  566. CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
  567. queue_work(sync_dev->work_queue,
  568. &sync_cb->cb_dispatch_work);
  569. }
  570. /* Dispatch user payloads if any were registered earlier */
  571. list_for_each_entry_safe(payload_info, temp_payload_info,
  572. &signalable_row->user_payload_list, list) {
  573. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  574. if (!sync_dev->cam_sync_eventq) {
  575. spin_unlock_bh(
  576. &sync_dev->cam_sync_eventq_lock);
  577. break;
  578. }
  579. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  580. cam_sync_util_send_v4l2_event(
  581. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  582. sync_obj,
  583. status,
  584. payload_info->payload_data,
  585. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  586. event_cause);
  587. list_del_init(&payload_info->list);
  588. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  589. &cam_sync_monitor_mask))
  590. cam_generic_fence_update_monitor_array(sync_obj,
  591. &sync_dev->table_lock, sync_dev->mon_data,
  592. CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
  593. /*
  594. * We can free the list node here because
  595. * sending V4L event will make a deep copy
  596. * anyway
  597. */
  598. kfree(payload_info);
  599. }
  600. /*
  601. * This needs to be done because we want to unblock anyone
  602. * who might be blocked and waiting on this sync object
  603. */
  604. complete_all(&signalable_row->signaled);
  605. }
  606. void cam_sync_util_send_v4l2_event(uint32_t id,
  607. uint32_t sync_obj,
  608. int status,
  609. void *payload,
  610. int len, uint32_t event_cause)
  611. {
  612. struct v4l2_event event;
  613. __u64 *payload_data = NULL;
  614. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  615. struct cam_sync_ev_header_v2 *ev_header = NULL;
  616. event.id = id;
  617. event.type = CAM_SYNC_V4L_EVENT_V2;
  618. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  619. ev_header->sync_obj = sync_obj;
  620. ev_header->status = status;
  621. ev_header->version = sync_dev->version;
  622. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  623. event_cause;
  624. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  625. } else {
  626. struct cam_sync_ev_header *ev_header = NULL;
  627. event.id = id;
  628. event.type = CAM_SYNC_V4L_EVENT;
  629. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  630. ev_header->sync_obj = sync_obj;
  631. ev_header->status = status;
  632. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  633. }
  634. memcpy(payload_data, payload, len);
  635. v4l2_event_queue(sync_dev->vdev, &event);
  636. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  637. sync_dev->version,
  638. sync_obj);
  639. }
  640. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  641. int new_state)
  642. {
  643. int rc = 0;
  644. switch (parent_row->state) {
  645. case CAM_SYNC_STATE_ACTIVE:
  646. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  647. parent_row->state = new_state;
  648. break;
  649. case CAM_SYNC_STATE_SIGNALED_ERROR:
  650. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  651. break;
  652. case CAM_SYNC_STATE_INVALID:
  653. default:
  654. rc = -EINVAL;
  655. break;
  656. }
  657. return rc;
  658. }
  659. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  660. uint32_t list_clean_type, uint32_t sync_obj)
  661. {
  662. struct sync_child_info *child_info = NULL;
  663. struct sync_child_info *temp_child_info = NULL;
  664. uint32_t curr_sync_obj;
  665. list_for_each_entry_safe(child_info,
  666. temp_child_info, &row->children_list, list) {
  667. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  668. (child_info->sync_id != sync_obj))
  669. continue;
  670. curr_sync_obj = child_info->sync_id;
  671. list_del_init(&child_info->list);
  672. kfree(child_info);
  673. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  674. (curr_sync_obj == sync_obj))
  675. break;
  676. }
  677. }
  678. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  679. uint32_t list_clean_type, uint32_t sync_obj)
  680. {
  681. struct sync_parent_info *parent_info = NULL;
  682. struct sync_parent_info *temp_parent_info = NULL;
  683. uint32_t curr_sync_obj;
  684. list_for_each_entry_safe(parent_info,
  685. temp_parent_info, &row->parents_list, list) {
  686. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  687. (parent_info->sync_id != sync_obj))
  688. continue;
  689. curr_sync_obj = parent_info->sync_id;
  690. list_del_init(&parent_info->list);
  691. kfree(parent_info);
  692. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  693. (curr_sync_obj == sync_obj))
  694. break;
  695. }
  696. }