cam_sync_util.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include "cam_sync_util.h"
  6. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  7. long *idx)
  8. {
  9. int rc = 0;
  10. mutex_lock(&sync_dev->table_lock);
  11. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  12. if (*idx < CAM_SYNC_MAX_OBJS)
  13. set_bit(*idx, sync_dev->bitmap);
  14. else
  15. rc = -1;
  16. mutex_unlock(&sync_dev->table_lock);
  17. return rc;
  18. }
  19. int cam_sync_init_row(struct sync_table_row *table,
  20. uint32_t idx, const char *name, uint32_t type)
  21. {
  22. struct sync_table_row *row = table + idx;
  23. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  24. return -EINVAL;
  25. memset(row, 0, sizeof(*row));
  26. if (name)
  27. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  28. INIT_LIST_HEAD(&row->parents_list);
  29. INIT_LIST_HEAD(&row->children_list);
  30. row->type = type;
  31. row->sync_id = idx;
  32. row->state = CAM_SYNC_STATE_ACTIVE;
  33. row->remaining = 0;
  34. atomic_set(&row->ref_cnt, 0);
  35. init_completion(&row->signaled);
  36. INIT_LIST_HEAD(&row->callback_list);
  37. INIT_LIST_HEAD(&row->user_payload_list);
  38. CAM_DBG(CAM_SYNC,
  39. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  40. row->name, row->sync_id, idx, row->state);
  41. return 0;
  42. }
  43. int cam_sync_init_group_object(struct sync_table_row *table,
  44. uint32_t idx,
  45. uint32_t *sync_objs,
  46. uint32_t num_objs)
  47. {
  48. int i, rc = 0;
  49. struct sync_child_info *child_info;
  50. struct sync_parent_info *parent_info;
  51. struct sync_table_row *row = table + idx;
  52. struct sync_table_row *child_row = NULL;
  53. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  54. /*
  55. * While traversing for children, parent's row list is updated with
  56. * child info and each child's row is updated with parent info.
  57. * If any child state is ERROR or SUCCESS, it will not be added to list.
  58. */
  59. for (i = 0; i < num_objs; i++) {
  60. child_row = table + sync_objs[i];
  61. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  62. /* validate child */
  63. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  64. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  65. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  66. CAM_ERR(CAM_SYNC,
  67. "Invalid child fence:%i state:%u type:%u",
  68. child_row->sync_id, child_row->state,
  69. child_row->type);
  70. rc = -EINVAL;
  71. goto clean_children_info;
  72. }
  73. /* check for child's state */
  74. if (child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
  75. row->state = CAM_SYNC_STATE_SIGNALED_ERROR;
  76. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  77. continue;
  78. }
  79. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  80. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  81. continue;
  82. }
  83. row->remaining++;
  84. /* Add child info */
  85. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  86. if (!child_info) {
  87. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  88. rc = -ENOMEM;
  89. goto clean_children_info;
  90. }
  91. child_info->sync_id = sync_objs[i];
  92. list_add_tail(&child_info->list, &row->children_list);
  93. /* Add parent info */
  94. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  95. if (!parent_info) {
  96. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  97. rc = -ENOMEM;
  98. goto clean_children_info;
  99. }
  100. parent_info->sync_id = idx;
  101. list_add_tail(&parent_info->list, &child_row->parents_list);
  102. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  103. }
  104. if (!row->remaining) {
  105. if (row->state != CAM_SYNC_STATE_SIGNALED_ERROR)
  106. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  107. complete_all(&row->signaled);
  108. }
  109. return 0;
  110. clean_children_info:
  111. row->state = CAM_SYNC_STATE_INVALID;
  112. for (i = i-1; i >= 0; i--) {
  113. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  114. child_row = table + sync_objs[i];
  115. cam_sync_util_cleanup_parents_list(child_row,
  116. SYNC_LIST_CLEAN_ONE, idx);
  117. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  118. }
  119. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  120. return rc;
  121. }
  122. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
  123. {
  124. struct sync_table_row *row = table + idx;
  125. struct sync_child_info *child_info, *temp_child;
  126. struct sync_callback_info *sync_cb, *temp_cb;
  127. struct sync_parent_info *parent_info, *temp_parent;
  128. struct sync_user_payload *upayload_info, *temp_upayload;
  129. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  130. struct list_head temp_child_list, temp_parent_list;
  131. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  132. return -EINVAL;
  133. CAM_DBG(CAM_SYNC,
  134. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  135. row->name, row->sync_id, idx, row->state);
  136. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  137. if (row->state == CAM_SYNC_STATE_INVALID) {
  138. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  139. CAM_ERR(CAM_SYNC,
  140. "Error: accessing an uninitialized sync obj: idx = %d",
  141. idx);
  142. return -EINVAL;
  143. }
  144. if (row->state == CAM_SYNC_STATE_ACTIVE)
  145. CAM_DBG(CAM_SYNC,
  146. "Destroying an active sync object name:%s id:%i",
  147. row->name, row->sync_id);
  148. row->state = CAM_SYNC_STATE_INVALID;
  149. /* Object's child and parent objects will be added into this list */
  150. INIT_LIST_HEAD(&temp_child_list);
  151. INIT_LIST_HEAD(&temp_parent_list);
  152. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  153. list) {
  154. if (child_info->sync_id <= 0)
  155. continue;
  156. list_del_init(&child_info->list);
  157. list_add_tail(&child_info->list, &temp_child_list);
  158. }
  159. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  160. list) {
  161. if (parent_info->sync_id <= 0)
  162. continue;
  163. list_del_init(&parent_info->list);
  164. list_add_tail(&parent_info->list, &temp_parent_list);
  165. }
  166. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  167. /* Cleanup the child to parent link from child list */
  168. while (!list_empty(&temp_child_list)) {
  169. child_info = list_first_entry(&temp_child_list,
  170. struct sync_child_info, list);
  171. child_row = sync_dev->sync_table + child_info->sync_id;
  172. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  173. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  174. list_del_init(&child_info->list);
  175. spin_unlock_bh(&sync_dev->row_spinlocks[
  176. child_info->sync_id]);
  177. kfree(child_info);
  178. continue;
  179. }
  180. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  181. CAM_DBG(CAM_SYNC,
  182. "Warning: destroying active child sync obj = %d",
  183. child_info->sync_id);
  184. cam_sync_util_cleanup_parents_list(child_row,
  185. SYNC_LIST_CLEAN_ONE, idx);
  186. list_del_init(&child_info->list);
  187. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  188. kfree(child_info);
  189. }
  190. /* Cleanup the parent to child link */
  191. while (!list_empty(&temp_parent_list)) {
  192. parent_info = list_first_entry(&temp_parent_list,
  193. struct sync_parent_info, list);
  194. parent_row = sync_dev->sync_table + parent_info->sync_id;
  195. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  196. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  197. list_del_init(&parent_info->list);
  198. spin_unlock_bh(&sync_dev->row_spinlocks[
  199. parent_info->sync_id]);
  200. kfree(parent_info);
  201. continue;
  202. }
  203. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  204. CAM_DBG(CAM_SYNC,
  205. "Warning: destroying active parent sync obj = %d",
  206. parent_info->sync_id);
  207. cam_sync_util_cleanup_children_list(parent_row,
  208. SYNC_LIST_CLEAN_ONE, idx);
  209. list_del_init(&parent_info->list);
  210. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  211. kfree(parent_info);
  212. }
  213. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  214. list_for_each_entry_safe(upayload_info, temp_upayload,
  215. &row->user_payload_list, list) {
  216. list_del_init(&upayload_info->list);
  217. kfree(upayload_info);
  218. }
  219. list_for_each_entry_safe(sync_cb, temp_cb,
  220. &row->callback_list, list) {
  221. list_del_init(&sync_cb->list);
  222. kfree(sync_cb);
  223. }
  224. memset(row, 0, sizeof(*row));
  225. clear_bit(idx, sync_dev->bitmap);
  226. INIT_LIST_HEAD(&row->callback_list);
  227. INIT_LIST_HEAD(&row->parents_list);
  228. INIT_LIST_HEAD(&row->children_list);
  229. INIT_LIST_HEAD(&row->user_payload_list);
  230. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  231. CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
  232. return 0;
  233. }
  234. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  235. {
  236. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  237. struct sync_callback_info,
  238. cb_dispatch_work);
  239. cb_info->callback_func(cb_info->sync_obj,
  240. cb_info->status,
  241. cb_info->cb_data);
  242. kfree(cb_info);
  243. }
  244. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  245. uint32_t status)
  246. {
  247. struct sync_callback_info *sync_cb;
  248. struct sync_user_payload *payload_info;
  249. struct sync_callback_info *temp_sync_cb;
  250. struct sync_table_row *signalable_row;
  251. struct sync_user_payload *temp_payload_info;
  252. signalable_row = sync_dev->sync_table + sync_obj;
  253. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  254. CAM_DBG(CAM_SYNC,
  255. "Accessing invalid sync object:%i", sync_obj);
  256. return;
  257. }
  258. /* Dispatch kernel callbacks if any were registered earlier */
  259. list_for_each_entry_safe(sync_cb,
  260. temp_sync_cb, &signalable_row->callback_list, list) {
  261. sync_cb->status = status;
  262. list_del_init(&sync_cb->list);
  263. queue_work(sync_dev->work_queue,
  264. &sync_cb->cb_dispatch_work);
  265. }
  266. /* Dispatch user payloads if any were registered earlier */
  267. list_for_each_entry_safe(payload_info, temp_payload_info,
  268. &signalable_row->user_payload_list, list) {
  269. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  270. if (!sync_dev->cam_sync_eventq) {
  271. spin_unlock_bh(
  272. &sync_dev->cam_sync_eventq_lock);
  273. break;
  274. }
  275. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  276. cam_sync_util_send_v4l2_event(
  277. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  278. sync_obj,
  279. status,
  280. payload_info->payload_data,
  281. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  282. list_del_init(&payload_info->list);
  283. /*
  284. * We can free the list node here because
  285. * sending V4L event will make a deep copy
  286. * anyway
  287. */
  288. kfree(payload_info);
  289. }
  290. /*
  291. * This needs to be done because we want to unblock anyone
  292. * who might be blocked and waiting on this sync object
  293. */
  294. complete_all(&signalable_row->signaled);
  295. }
  296. void cam_sync_util_send_v4l2_event(uint32_t id,
  297. uint32_t sync_obj,
  298. int status,
  299. void *payload,
  300. int len)
  301. {
  302. struct v4l2_event event;
  303. __u64 *payload_data = NULL;
  304. struct cam_sync_ev_header *ev_header = NULL;
  305. event.id = id;
  306. event.type = CAM_SYNC_V4L_EVENT;
  307. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  308. ev_header->sync_obj = sync_obj;
  309. ev_header->status = status;
  310. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  311. memcpy(payload_data, payload, len);
  312. v4l2_event_queue(sync_dev->vdev, &event);
  313. CAM_DBG(CAM_SYNC, "send v4l2 event for sync_obj :%d",
  314. sync_obj);
  315. }
  316. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  317. int new_state)
  318. {
  319. int rc = 0;
  320. switch (parent_row->state) {
  321. case CAM_SYNC_STATE_ACTIVE:
  322. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  323. parent_row->state = new_state;
  324. break;
  325. case CAM_SYNC_STATE_SIGNALED_ERROR:
  326. break;
  327. case CAM_SYNC_STATE_INVALID:
  328. default:
  329. rc = -EINVAL;
  330. break;
  331. }
  332. return rc;
  333. }
  334. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  335. uint32_t list_clean_type, uint32_t sync_obj)
  336. {
  337. struct sync_child_info *child_info = NULL;
  338. struct sync_child_info *temp_child_info = NULL;
  339. uint32_t curr_sync_obj;
  340. list_for_each_entry_safe(child_info,
  341. temp_child_info, &row->children_list, list) {
  342. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  343. (child_info->sync_id != sync_obj))
  344. continue;
  345. curr_sync_obj = child_info->sync_id;
  346. list_del_init(&child_info->list);
  347. kfree(child_info);
  348. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  349. (curr_sync_obj == sync_obj))
  350. break;
  351. }
  352. }
  353. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  354. uint32_t list_clean_type, uint32_t sync_obj)
  355. {
  356. struct sync_parent_info *parent_info = NULL;
  357. struct sync_parent_info *temp_parent_info = NULL;
  358. uint32_t curr_sync_obj;
  359. list_for_each_entry_safe(parent_info,
  360. temp_parent_info, &row->parents_list, list) {
  361. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  362. (parent_info->sync_id != sync_obj))
  363. continue;
  364. curr_sync_obj = parent_info->sync_id;
  365. list_del_init(&parent_info->list);
  366. kfree(parent_info);
  367. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  368. (curr_sync_obj == sync_obj))
  369. break;
  370. }
  371. }