cam_sync_util.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "cam_sync_util.h"
  7. #include "cam_req_mgr_workq.h"
  8. #include "cam_common_util.h"
  9. extern unsigned long cam_sync_monitor_mask;
  10. static int cam_generic_expand_monitor_table(int idx, struct mutex *lock,
  11. struct cam_generic_fence_monitor_data **mon_data)
  12. {
  13. struct cam_generic_fence_monitor_data *row_mon_data;
  14. if (lock)
  15. mutex_lock(lock);
  16. row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)];
  17. if (!row_mon_data) {
  18. row_mon_data = kzalloc(
  19. sizeof(struct cam_generic_fence_monitor_data) *
  20. CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, GFP_KERNEL);
  21. mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)] = row_mon_data;
  22. }
  23. if (!row_mon_data) {
  24. CAM_ERR(CAM_SYNC, "Error allocating memory %d, idx %d",
  25. sizeof(struct cam_generic_fence_monitor_data) *
  26. CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, idx);
  27. if (lock)
  28. mutex_unlock(lock);
  29. return -ENOMEM;
  30. }
  31. if (lock)
  32. mutex_unlock(lock);
  33. return 0;
  34. }
  35. static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_monitor_entries(int idx)
  36. {
  37. struct cam_generic_fence_monitor_data *mon_data;
  38. mon_data = CAM_SYNC_MONITOR_GET_DATA(idx);
  39. if (mon_data->swap_monitor_entries)
  40. return mon_data->prev_monitor_entries;
  41. else
  42. return mon_data->monitor_entries;
  43. }
  44. static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_prev_monitor_entries(int idx)
  45. {
  46. struct cam_generic_fence_monitor_data *mon_data;
  47. mon_data = CAM_SYNC_MONITOR_GET_DATA(idx);
  48. if (mon_data->swap_monitor_entries)
  49. return mon_data->monitor_entries;
  50. else
  51. return mon_data->prev_monitor_entries;
  52. }
  53. const char *cam_fence_op_to_string(
  54. enum cam_fence_op op)
  55. {
  56. switch (op) {
  57. case CAM_FENCE_OP_CREATE:
  58. return "CREATE";
  59. case CAM_FENCE_OP_REGISTER_CB:
  60. return "REGISTER_CB";
  61. case CAM_FENCE_OP_SIGNAL:
  62. return "SIGNAL";
  63. case CAM_FENCE_OP_UNREGISTER_ON_SIGNAL:
  64. return "UNREGISTER_ON_SIGNAL";
  65. case CAM_FENCE_OP_UNREGISTER_CB:
  66. return "UNREGISTER_CB";
  67. case CAM_FENCE_OP_SKIP_REGISTER_CB:
  68. return "SKIP_REGISTER_CB";
  69. case CAM_FENCE_OP_ALREADY_REGISTERED_CB:
  70. return "ALREADY_REGISTERED_CB";
  71. case CAM_FENCE_OP_DESTROY:
  72. return "DESTROY";
  73. default:
  74. return "INVALID";
  75. }
  76. }
  77. static void __cam_sync_save_previous_monitor_data(
  78. struct sync_table_row *row)
  79. {
  80. struct cam_generic_fence_monitor_data *row_mon_data;
  81. if (!sync_dev->mon_data)
  82. return;
  83. row_mon_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id);
  84. /* save current usage details into prev variables */
  85. strscpy(row_mon_data->prev_name, row->name, SYNC_DEBUG_NAME_LEN);
  86. row_mon_data->prev_type = row->type;
  87. row_mon_data->prev_obj_id = row->sync_id;
  88. row_mon_data->prev_state = row->state;
  89. row_mon_data->prev_remaining = row->remaining;
  90. row_mon_data->prev_monitor_head = atomic64_read(&row_mon_data->monitor_head);
  91. /* Toggle swap flag. Avoid copying and just read/write using correct table idx */
  92. row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries;
  93. }
  94. void cam_generic_fence_update_monitor_array(int idx,
  95. struct mutex *lock,
  96. struct cam_generic_fence_monitor_data **mon_data,
  97. enum cam_fence_op op)
  98. {
  99. int iterator, rc;
  100. struct cam_generic_fence_monitor_data *row_mon_data;
  101. struct cam_generic_fence_monitor_entry *row_mon_entries;
  102. /* Validate inputs */
  103. if (!mon_data)
  104. return;
  105. row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)];
  106. if (!row_mon_data) {
  107. rc = cam_generic_expand_monitor_table(idx, lock, mon_data);
  108. if (rc) {
  109. CAM_ERR(CAM_SYNC, "Failed to expand monitor table");
  110. return;
  111. }
  112. }
  113. row_mon_data = CAM_GENERIC_MONITOR_GET_DATA(mon_data, idx);
  114. if (op == CAM_FENCE_OP_CREATE)
  115. atomic64_set(&row_mon_data->monitor_head, -1);
  116. if (row_mon_data->swap_monitor_entries)
  117. row_mon_entries = row_mon_data->monitor_entries;
  118. else
  119. row_mon_entries = row_mon_data->prev_monitor_entries;
  120. CAM_SYNC_INC_MONITOR_HEAD(&row_mon_data->monitor_head, &iterator);
  121. CAM_GET_TIMESTAMP(row_mon_entries[iterator].timestamp);
  122. row_mon_entries[iterator].op = op;
  123. }
  124. static void __cam_generic_fence_dump_monitor_entries(
  125. struct cam_generic_fence_monitor_entry *monitor_entries,
  126. uint32_t index, uint32_t num_entries)
  127. {
  128. int i = 0;
  129. uint64_t ms, hrs, min, sec;
  130. for (i = 0; i < num_entries; i++) {
  131. CAM_CONVERT_TIMESTAMP_FORMAT(monitor_entries[index].timestamp,
  132. hrs, min, sec, ms);
  133. CAM_INFO(CAM_SYNC,
  134. "**** %llu:%llu:%llu.%llu : Index[%d] Op[%s]",
  135. hrs, min, sec, ms,
  136. index,
  137. cam_fence_op_to_string(monitor_entries[index].op));
  138. index = (index + 1) % CAM_SYNC_MONITOR_MAX_ENTRIES;
  139. }
  140. }
  141. static int __cam_generic_fence_get_monitor_entries_info(uint64_t state_head,
  142. uint32_t *oldest_entry, uint32_t *num_entries)
  143. {
  144. *oldest_entry = 0;
  145. *num_entries = 0;
  146. if (state_head == -1) {
  147. return -EINVAL;
  148. } else if (state_head < CAM_SYNC_MONITOR_MAX_ENTRIES) {
  149. /* head starts from -1 */
  150. *num_entries = state_head + 1;
  151. *oldest_entry = 0;
  152. } else {
  153. *num_entries = CAM_SYNC_MONITOR_MAX_ENTRIES;
  154. div_u64_rem(state_head + 1,
  155. CAM_SYNC_MONITOR_MAX_ENTRIES, oldest_entry);
  156. }
  157. return 0;
  158. }
  159. void cam_generic_fence_dump_monitor_array(
  160. struct cam_generic_fence_monitor_obj_info *obj_info)
  161. {
  162. int rc;
  163. uint32_t num_entries, oldest_entry;
  164. uint64_t ms, hrs, min, sec;
  165. struct timespec64 current_ts;
  166. struct cam_generic_fence_monitor_data *mon_data = obj_info->monitor_data;
  167. /* Check if there are any current entries in the monitor data */
  168. rc = __cam_generic_fence_get_monitor_entries_info(
  169. atomic64_read(&mon_data->monitor_head),
  170. &oldest_entry, &num_entries);
  171. if (rc)
  172. return;
  173. /* Print current monitor entries */
  174. CAM_GET_TIMESTAMP(current_ts);
  175. CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
  176. switch (obj_info->fence_type) {
  177. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  178. CAM_INFO(CAM_SYNC,
  179. "======== %llu:%llu:%llu:%llu Dumping monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d ref_cnt %d num_entries %u ===========",
  180. hrs, min, sec, ms, obj_info->name, obj_info->sync_type,
  181. obj_info->obj_id, obj_info->state, obj_info->remaining,
  182. obj_info->ref_cnt, num_entries);
  183. break;
  184. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  185. CAM_INFO(CAM_DMA_FENCE,
  186. "======== %llu:%llu:%llu:%llu Dumping monitor information for dma obj %s, fd %d sync_id %d state %d ref_cnt %d num_entries %u ===========",
  187. hrs, min, sec, ms, obj_info->name, obj_info->obj_id,
  188. obj_info->sync_id, obj_info->state, obj_info->ref_cnt,
  189. num_entries);
  190. break;
  191. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  192. CAM_INFO(CAM_SYNX,
  193. "======== %llu:%llu:%llu:%llu Dumping monitor information for synx obj %s, synx_id %d sync_id %d state %d ref_cnt %d num_entries %u ===========",
  194. hrs, min, sec, ms, obj_info->name, obj_info->obj_id,
  195. obj_info->sync_id, obj_info->state, obj_info->ref_cnt,
  196. num_entries);
  197. break;
  198. default:
  199. break;
  200. }
  201. __cam_generic_fence_dump_monitor_entries(obj_info->monitor_entries,
  202. oldest_entry, num_entries);
  203. /* Check if there are any previous entries in the monitor data */
  204. rc = __cam_generic_fence_get_monitor_entries_info(
  205. mon_data->prev_monitor_head,
  206. &oldest_entry, &num_entries);
  207. if (rc)
  208. return;
  209. /* Print previous monitor entries */
  210. CAM_GET_TIMESTAMP(current_ts);
  211. CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
  212. switch (obj_info->fence_type) {
  213. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  214. CAM_INFO(CAM_SYNC,
  215. "======== %llu:%llu:%llu:%llu Dumping previous monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d num_entries %u ===========",
  216. hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_type,
  217. mon_data->prev_obj_id, mon_data->prev_state, mon_data->prev_remaining,
  218. num_entries);
  219. break;
  220. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  221. CAM_INFO(CAM_DMA_FENCE,
  222. "======== %llu:%llu:%llu:%llu Dumping previous monitor information for dma obj %s, fd %d sync_id %d state %d num_entries %u ===========",
  223. hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id,
  224. mon_data->prev_sync_id, mon_data->prev_state,
  225. num_entries);
  226. break;
  227. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  228. CAM_INFO(CAM_SYNX,
  229. "======== %llu:%llu:%llu:%llu Dumping previous monitor information for synx obj %s, synx_id %d sync_id %d state %d num_entries %u ===========",
  230. hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id,
  231. mon_data->prev_sync_id, mon_data->prev_state,
  232. num_entries);
  233. break;
  234. default:
  235. break;
  236. }
  237. __cam_generic_fence_dump_monitor_entries(obj_info->prev_monitor_entries,
  238. oldest_entry, num_entries);
  239. }
  240. void cam_sync_dump_monitor_array(struct sync_table_row *row)
  241. {
  242. struct cam_generic_fence_monitor_obj_info obj_info;
  243. if (!sync_dev->mon_data ||
  244. !test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask) ||
  245. !(CAM_GENERIC_MONITOR_GET_DATA(sync_dev->mon_data, row->sync_id)->prev_obj_id))
  246. return;
  247. obj_info.name = row->name;
  248. obj_info.sync_type = row->type;
  249. obj_info.obj_id = row->sync_id;
  250. obj_info.state = row->state;
  251. obj_info.remaining = row->remaining;
  252. obj_info.ref_cnt = atomic_read(&row->ref_cnt);
  253. obj_info.monitor_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id);
  254. obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_SYNC_OBJ;
  255. obj_info.monitor_entries =
  256. __cam_sync_get_monitor_entries(row->sync_id);
  257. obj_info.prev_monitor_entries =
  258. __cam_sync_get_prev_monitor_entries(row->sync_id);
  259. cam_generic_fence_dump_monitor_array(&obj_info);
  260. }
  261. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  262. long *idx)
  263. {
  264. int rc = 0;
  265. mutex_lock(&sync_dev->table_lock);
  266. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  267. if (*idx < CAM_SYNC_MAX_OBJS)
  268. set_bit(*idx, sync_dev->bitmap);
  269. else
  270. rc = -1;
  271. mutex_unlock(&sync_dev->table_lock);
  272. return rc;
  273. }
  274. int cam_sync_init_row(struct sync_table_row *table,
  275. uint32_t idx, const char *name, uint32_t type)
  276. {
  277. struct sync_table_row *row = table + idx;
  278. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  279. return -EINVAL;
  280. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  281. INIT_LIST_HEAD(&row->parents_list);
  282. INIT_LIST_HEAD(&row->children_list);
  283. row->type = type;
  284. row->sync_id = idx;
  285. row->state = CAM_SYNC_STATE_ACTIVE;
  286. row->remaining = 0;
  287. atomic_set(&row->ref_cnt, 0);
  288. init_completion(&row->signaled);
  289. INIT_LIST_HEAD(&row->callback_list);
  290. INIT_LIST_HEAD(&row->user_payload_list);
  291. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  292. cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock,
  293. sync_dev->mon_data,
  294. CAM_FENCE_OP_CREATE);
  295. }
  296. CAM_DBG(CAM_SYNC,
  297. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  298. row->name, row->sync_id, idx, row->state);
  299. return 0;
  300. }
  301. int cam_sync_init_group_object(struct sync_table_row *table,
  302. uint32_t idx,
  303. uint32_t *sync_objs,
  304. uint32_t num_objs)
  305. {
  306. int i, rc;
  307. struct sync_child_info *child_info;
  308. struct sync_parent_info *parent_info;
  309. struct sync_table_row *row = table + idx;
  310. struct sync_table_row *child_row = NULL;
  311. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  312. /*
  313. * While traversing for children, parent's row list is updated with
  314. * child info and each child's row is updated with parent info.
  315. * If any child state is ERROR or SUCCESS, it will not be added to list.
  316. */
  317. for (i = 0; i < num_objs; i++) {
  318. child_row = table + sync_objs[i];
  319. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  320. /* validate child */
  321. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  322. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  323. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  324. CAM_ERR(CAM_SYNC,
  325. "Invalid child fence:%i state:%u type:%u",
  326. child_row->sync_id, child_row->state,
  327. child_row->type);
  328. rc = -EINVAL;
  329. goto clean_children_info;
  330. }
  331. /* check for child's state */
  332. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  333. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  334. row->state = child_row->state;
  335. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  336. continue;
  337. }
  338. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  339. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  340. continue;
  341. }
  342. row->remaining++;
  343. /* Add child info */
  344. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  345. if (!child_info) {
  346. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  347. rc = -ENOMEM;
  348. goto clean_children_info;
  349. }
  350. child_info->sync_id = sync_objs[i];
  351. list_add_tail(&child_info->list, &row->children_list);
  352. /* Add parent info */
  353. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  354. if (!parent_info) {
  355. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  356. rc = -ENOMEM;
  357. goto clean_children_info;
  358. }
  359. parent_info->sync_id = idx;
  360. list_add_tail(&parent_info->list, &child_row->parents_list);
  361. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  362. }
  363. if (!row->remaining) {
  364. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  365. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  366. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  367. complete_all(&row->signaled);
  368. }
  369. return 0;
  370. clean_children_info:
  371. row->state = CAM_SYNC_STATE_INVALID;
  372. for (i = i-1; i >= 0; i--) {
  373. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  374. child_row = table + sync_objs[i];
  375. cam_sync_util_cleanup_parents_list(child_row,
  376. SYNC_LIST_CLEAN_ONE, idx);
  377. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  378. }
  379. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  380. return rc;
  381. }
  382. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
  383. struct cam_sync_check_for_dma_release *check_for_dma_release,
  384. struct cam_sync_check_for_synx_release *check_for_synx_release)
  385. {
  386. struct sync_table_row *row = table + idx;
  387. struct sync_child_info *child_info, *temp_child;
  388. struct sync_callback_info *sync_cb, *temp_cb;
  389. struct sync_parent_info *parent_info, *temp_parent;
  390. struct sync_user_payload *upayload_info, *temp_upayload;
  391. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  392. struct list_head temp_child_list, temp_parent_list;
  393. if (!table || (idx <= 0) || (idx >= CAM_SYNC_MAX_OBJS))
  394. return -EINVAL;
  395. CAM_DBG(CAM_SYNC,
  396. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  397. row->name, row->sync_id, idx, row->state);
  398. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  399. if (row->state == CAM_SYNC_STATE_INVALID) {
  400. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  401. CAM_ERR(CAM_SYNC,
  402. "Error: accessing an uninitialized sync obj: idx = %d name = %s",
  403. idx,
  404. row->name);
  405. return -EINVAL;
  406. }
  407. if (row->state == CAM_SYNC_STATE_ACTIVE)
  408. CAM_DBG(CAM_SYNC,
  409. "Destroying an active sync object name:%s id:%i",
  410. row->name, row->sync_id);
  411. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  412. cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock,
  413. sync_dev->mon_data,
  414. CAM_FENCE_OP_DESTROY);
  415. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ_DUMP, &cam_sync_monitor_mask))
  416. cam_sync_dump_monitor_array(row);
  417. __cam_sync_save_previous_monitor_data(row);
  418. }
  419. row->state = CAM_SYNC_STATE_INVALID;
  420. /* Object's child and parent objects will be added into this list */
  421. INIT_LIST_HEAD(&temp_child_list);
  422. INIT_LIST_HEAD(&temp_parent_list);
  423. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  424. list) {
  425. if (child_info->sync_id <= 0)
  426. continue;
  427. list_del_init(&child_info->list);
  428. list_add_tail(&child_info->list, &temp_child_list);
  429. }
  430. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  431. list) {
  432. if (parent_info->sync_id <= 0)
  433. continue;
  434. list_del_init(&parent_info->list);
  435. list_add_tail(&parent_info->list, &temp_parent_list);
  436. }
  437. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  438. /* Cleanup the child to parent link from child list */
  439. while (!list_empty(&temp_child_list)) {
  440. child_info = list_first_entry(&temp_child_list,
  441. struct sync_child_info, list);
  442. child_row = sync_dev->sync_table + child_info->sync_id;
  443. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  444. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  445. list_del_init(&child_info->list);
  446. spin_unlock_bh(&sync_dev->row_spinlocks[
  447. child_info->sync_id]);
  448. kfree(child_info);
  449. continue;
  450. }
  451. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  452. CAM_DBG(CAM_SYNC,
  453. "Warning: destroying active child sync obj = %s[%d]",
  454. child_row->name,
  455. child_info->sync_id);
  456. cam_sync_util_cleanup_parents_list(child_row,
  457. SYNC_LIST_CLEAN_ONE, idx);
  458. list_del_init(&child_info->list);
  459. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  460. kfree(child_info);
  461. }
  462. /* Cleanup the parent to child link */
  463. while (!list_empty(&temp_parent_list)) {
  464. parent_info = list_first_entry(&temp_parent_list,
  465. struct sync_parent_info, list);
  466. parent_row = sync_dev->sync_table + parent_info->sync_id;
  467. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  468. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  469. list_del_init(&parent_info->list);
  470. spin_unlock_bh(&sync_dev->row_spinlocks[
  471. parent_info->sync_id]);
  472. kfree(parent_info);
  473. continue;
  474. }
  475. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  476. CAM_DBG(CAM_SYNC,
  477. "Warning: destroying active parent sync obj = %s[%d]",
  478. parent_row->name,
  479. parent_info->sync_id);
  480. cam_sync_util_cleanup_children_list(parent_row,
  481. SYNC_LIST_CLEAN_ONE, idx);
  482. list_del_init(&parent_info->list);
  483. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  484. kfree(parent_info);
  485. }
  486. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  487. list_for_each_entry_safe(upayload_info, temp_upayload,
  488. &row->user_payload_list, list) {
  489. list_del_init(&upayload_info->list);
  490. kfree(upayload_info);
  491. }
  492. list_for_each_entry_safe(sync_cb, temp_cb,
  493. &row->callback_list, list) {
  494. list_del_init(&sync_cb->list);
  495. kfree(sync_cb);
  496. }
  497. /* Decrement ref cnt for imported dma fence */
  498. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  499. cam_dma_fence_get_put_ref(false, row->dma_fence_info.dma_fence_row_idx);
  500. /* Check if same dma fence is being released with the sync obj */
  501. if (check_for_dma_release) {
  502. if (row->dma_fence_info.dma_fence_fd ==
  503. check_for_dma_release->dma_fence_fd) {
  504. check_for_dma_release->sync_created_with_dma =
  505. row->dma_fence_info.sync_created_with_dma;
  506. check_for_dma_release->dma_fence_row_idx =
  507. row->dma_fence_info.dma_fence_row_idx;
  508. }
  509. }
  510. }
  511. /* Check if same synx obj is being released with the sync obj */
  512. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  513. if (check_for_synx_release) {
  514. if (row->synx_obj_info.synx_obj ==
  515. check_for_synx_release->synx_obj) {
  516. check_for_synx_release->synx_obj_row_idx =
  517. row->synx_obj_info.synx_obj_row_idx;
  518. check_for_synx_release->sync_created_with_synx =
  519. row->synx_obj_info.sync_created_with_synx;
  520. }
  521. }
  522. }
  523. memset(row, 0, sizeof(*row));
  524. clear_bit(idx, sync_dev->bitmap);
  525. INIT_LIST_HEAD(&row->callback_list);
  526. INIT_LIST_HEAD(&row->parents_list);
  527. INIT_LIST_HEAD(&row->children_list);
  528. INIT_LIST_HEAD(&row->user_payload_list);
  529. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  530. return 0;
  531. }
  532. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  533. {
  534. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  535. struct sync_callback_info,
  536. cb_dispatch_work);
  537. sync_callback sync_data = cb_info->callback_func;
  538. void *cb = cb_info->callback_func;
  539. cam_common_util_thread_switch_delay_detect(
  540. "cam_sync_workq", "schedule", cb,
  541. cb_info->workq_scheduled_ts,
  542. CAM_WORKQ_SCHEDULE_TIME_THRESHOLD);
  543. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  544. kfree(cb_info);
  545. }
  546. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  547. uint32_t status, uint32_t event_cause)
  548. {
  549. struct sync_callback_info *sync_cb;
  550. struct sync_user_payload *payload_info;
  551. struct sync_callback_info *temp_sync_cb;
  552. struct sync_table_row *signalable_row;
  553. struct sync_user_payload *temp_payload_info;
  554. signalable_row = sync_dev->sync_table + sync_obj;
  555. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  556. CAM_DBG(CAM_SYNC,
  557. "Accessing invalid sync object:%s[%i]", signalable_row->name,
  558. sync_obj);
  559. return;
  560. }
  561. /* Dispatch kernel callbacks if any were registered earlier */
  562. list_for_each_entry_safe(sync_cb,
  563. temp_sync_cb, &signalable_row->callback_list, list) {
  564. sync_cb->status = status;
  565. list_del_init(&sync_cb->list);
  566. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  567. &cam_sync_monitor_mask))
  568. cam_generic_fence_update_monitor_array(sync_obj,
  569. &sync_dev->table_lock, sync_dev->mon_data,
  570. CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
  571. queue_work(sync_dev->work_queue,
  572. &sync_cb->cb_dispatch_work);
  573. }
  574. /* Dispatch user payloads if any were registered earlier */
  575. list_for_each_entry_safe(payload_info, temp_payload_info,
  576. &signalable_row->user_payload_list, list) {
  577. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  578. if (!sync_dev->cam_sync_eventq) {
  579. spin_unlock_bh(
  580. &sync_dev->cam_sync_eventq_lock);
  581. break;
  582. }
  583. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  584. cam_sync_util_send_v4l2_event(
  585. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  586. sync_obj,
  587. status,
  588. payload_info->payload_data,
  589. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  590. event_cause);
  591. list_del_init(&payload_info->list);
  592. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  593. &cam_sync_monitor_mask))
  594. cam_generic_fence_update_monitor_array(sync_obj,
  595. &sync_dev->table_lock, sync_dev->mon_data,
  596. CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
  597. /*
  598. * We can free the list node here because
  599. * sending V4L event will make a deep copy
  600. * anyway
  601. */
  602. kfree(payload_info);
  603. }
  604. /*
  605. * This needs to be done because we want to unblock anyone
  606. * who might be blocked and waiting on this sync object
  607. */
  608. complete_all(&signalable_row->signaled);
  609. }
  610. void cam_sync_util_send_v4l2_event(uint32_t id,
  611. uint32_t sync_obj,
  612. int status,
  613. void *payload,
  614. int len, uint32_t event_cause)
  615. {
  616. struct v4l2_event event;
  617. __u64 *payload_data = NULL;
  618. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  619. struct cam_sync_ev_header_v2 *ev_header = NULL;
  620. event.id = id;
  621. event.type = CAM_SYNC_V4L_EVENT_V2;
  622. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  623. ev_header->sync_obj = sync_obj;
  624. ev_header->status = status;
  625. ev_header->version = sync_dev->version;
  626. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  627. event_cause;
  628. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  629. } else {
  630. struct cam_sync_ev_header *ev_header = NULL;
  631. event.id = id;
  632. event.type = CAM_SYNC_V4L_EVENT;
  633. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  634. ev_header->sync_obj = sync_obj;
  635. ev_header->status = status;
  636. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  637. }
  638. memcpy(payload_data, payload, len);
  639. v4l2_event_queue(sync_dev->vdev, &event);
  640. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  641. sync_dev->version,
  642. sync_obj);
  643. }
  644. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  645. int new_state)
  646. {
  647. int rc = 0;
  648. switch (parent_row->state) {
  649. case CAM_SYNC_STATE_ACTIVE:
  650. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  651. parent_row->state = new_state;
  652. break;
  653. case CAM_SYNC_STATE_SIGNALED_ERROR:
  654. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  655. break;
  656. case CAM_SYNC_STATE_INVALID:
  657. default:
  658. rc = -EINVAL;
  659. break;
  660. }
  661. return rc;
  662. }
  663. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  664. uint32_t list_clean_type, uint32_t sync_obj)
  665. {
  666. struct sync_child_info *child_info = NULL;
  667. struct sync_child_info *temp_child_info = NULL;
  668. uint32_t curr_sync_obj;
  669. list_for_each_entry_safe(child_info,
  670. temp_child_info, &row->children_list, list) {
  671. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  672. (child_info->sync_id != sync_obj))
  673. continue;
  674. curr_sync_obj = child_info->sync_id;
  675. list_del_init(&child_info->list);
  676. kfree(child_info);
  677. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  678. (curr_sync_obj == sync_obj))
  679. break;
  680. }
  681. }
  682. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  683. uint32_t list_clean_type, uint32_t sync_obj)
  684. {
  685. struct sync_parent_info *parent_info = NULL;
  686. struct sync_parent_info *temp_parent_info = NULL;
  687. uint32_t curr_sync_obj;
  688. list_for_each_entry_safe(parent_info,
  689. temp_parent_info, &row->parents_list, list) {
  690. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  691. (parent_info->sync_id != sync_obj))
  692. continue;
  693. curr_sync_obj = parent_info->sync_id;
  694. list_del_init(&parent_info->list);
  695. kfree(parent_info);
  696. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  697. (curr_sync_obj == sync_obj))
  698. break;
  699. }
  700. }