cam_sync_util.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved.
  4. */
  5. #include "cam_sync_util.h"
  6. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  7. long *idx)
  8. {
  9. int rc = 0;
  10. mutex_lock(&sync_dev->table_lock);
  11. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  12. if (*idx < CAM_SYNC_MAX_OBJS)
  13. set_bit(*idx, sync_dev->bitmap);
  14. else
  15. rc = -1;
  16. mutex_unlock(&sync_dev->table_lock);
  17. return rc;
  18. }
  19. int cam_sync_init_row(struct sync_table_row *table,
  20. uint32_t idx, const char *name, uint32_t type)
  21. {
  22. struct sync_table_row *row = table + idx;
  23. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  24. return -EINVAL;
  25. memset(row, 0, sizeof(*row));
  26. if (name)
  27. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  28. INIT_LIST_HEAD(&row->parents_list);
  29. INIT_LIST_HEAD(&row->children_list);
  30. row->type = type;
  31. row->sync_id = idx;
  32. row->state = CAM_SYNC_STATE_ACTIVE;
  33. row->remaining = 0;
  34. atomic_set(&row->ref_cnt, 0);
  35. init_completion(&row->signaled);
  36. INIT_LIST_HEAD(&row->callback_list);
  37. INIT_LIST_HEAD(&row->user_payload_list);
  38. CAM_DBG(CAM_SYNC,
  39. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  40. row->name, row->sync_id, idx, row->state);
  41. return 0;
  42. }
  43. int cam_sync_init_group_object(struct sync_table_row *table,
  44. uint32_t idx,
  45. uint32_t *sync_objs,
  46. uint32_t num_objs)
  47. {
  48. int i, rc = 0;
  49. struct sync_child_info *child_info;
  50. struct sync_parent_info *parent_info;
  51. struct sync_table_row *row = table + idx;
  52. struct sync_table_row *child_row = NULL;
  53. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  54. /*
  55. * While traversing for children, parent's row list is updated with
  56. * child info and each child's row is updated with parent info.
  57. * If any child state is ERROR or SUCCESS, it will not be added to list.
  58. */
  59. for (i = 0; i < num_objs; i++) {
  60. child_row = table + sync_objs[i];
  61. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  62. /* validate child */
  63. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  64. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  65. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  66. CAM_ERR(CAM_SYNC,
  67. "Invalid child fence:%i state:%u type:%u",
  68. child_row->sync_id, child_row->state,
  69. child_row->type);
  70. rc = -EINVAL;
  71. goto clean_children_info;
  72. }
  73. /* check for child's state */
  74. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  75. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  76. row->state = child_row->state;
  77. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  78. continue;
  79. }
  80. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  81. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  82. continue;
  83. }
  84. row->remaining++;
  85. /* Add child info */
  86. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  87. if (!child_info) {
  88. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  89. rc = -ENOMEM;
  90. goto clean_children_info;
  91. }
  92. child_info->sync_id = sync_objs[i];
  93. list_add_tail(&child_info->list, &row->children_list);
  94. /* Add parent info */
  95. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  96. if (!parent_info) {
  97. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  98. rc = -ENOMEM;
  99. goto clean_children_info;
  100. }
  101. parent_info->sync_id = idx;
  102. list_add_tail(&parent_info->list, &child_row->parents_list);
  103. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  104. }
  105. if (!row->remaining) {
  106. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  107. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  108. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  109. complete_all(&row->signaled);
  110. }
  111. return 0;
  112. clean_children_info:
  113. row->state = CAM_SYNC_STATE_INVALID;
  114. for (i = i-1; i >= 0; i--) {
  115. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  116. child_row = table + sync_objs[i];
  117. cam_sync_util_cleanup_parents_list(child_row,
  118. SYNC_LIST_CLEAN_ONE, idx);
  119. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  120. }
  121. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  122. return rc;
  123. }
  124. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
  125. {
  126. struct sync_table_row *row = table + idx;
  127. struct sync_child_info *child_info, *temp_child;
  128. struct sync_callback_info *sync_cb, *temp_cb;
  129. struct sync_parent_info *parent_info, *temp_parent;
  130. struct sync_user_payload *upayload_info, *temp_upayload;
  131. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  132. struct list_head temp_child_list, temp_parent_list;
  133. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  134. return -EINVAL;
  135. CAM_DBG(CAM_SYNC,
  136. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  137. row->name, row->sync_id, idx, row->state);
  138. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  139. if (row->state == CAM_SYNC_STATE_INVALID) {
  140. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  141. CAM_ERR(CAM_SYNC,
  142. "Error: accessing an uninitialized sync obj: idx = %d",
  143. idx);
  144. return -EINVAL;
  145. }
  146. if (row->state == CAM_SYNC_STATE_ACTIVE)
  147. CAM_DBG(CAM_SYNC,
  148. "Destroying an active sync object name:%s id:%i",
  149. row->name, row->sync_id);
  150. row->state = CAM_SYNC_STATE_INVALID;
  151. /* Object's child and parent objects will be added into this list */
  152. INIT_LIST_HEAD(&temp_child_list);
  153. INIT_LIST_HEAD(&temp_parent_list);
  154. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  155. list) {
  156. if (child_info->sync_id <= 0)
  157. continue;
  158. list_del_init(&child_info->list);
  159. list_add_tail(&child_info->list, &temp_child_list);
  160. }
  161. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  162. list) {
  163. if (parent_info->sync_id <= 0)
  164. continue;
  165. list_del_init(&parent_info->list);
  166. list_add_tail(&parent_info->list, &temp_parent_list);
  167. }
  168. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  169. /* Cleanup the child to parent link from child list */
  170. while (!list_empty(&temp_child_list)) {
  171. child_info = list_first_entry(&temp_child_list,
  172. struct sync_child_info, list);
  173. child_row = sync_dev->sync_table + child_info->sync_id;
  174. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  175. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  176. list_del_init(&child_info->list);
  177. spin_unlock_bh(&sync_dev->row_spinlocks[
  178. child_info->sync_id]);
  179. kfree(child_info);
  180. continue;
  181. }
  182. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  183. CAM_DBG(CAM_SYNC,
  184. "Warning: destroying active child sync obj = %d",
  185. child_info->sync_id);
  186. cam_sync_util_cleanup_parents_list(child_row,
  187. SYNC_LIST_CLEAN_ONE, idx);
  188. list_del_init(&child_info->list);
  189. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  190. kfree(child_info);
  191. }
  192. /* Cleanup the parent to child link */
  193. while (!list_empty(&temp_parent_list)) {
  194. parent_info = list_first_entry(&temp_parent_list,
  195. struct sync_parent_info, list);
  196. parent_row = sync_dev->sync_table + parent_info->sync_id;
  197. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  198. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  199. list_del_init(&parent_info->list);
  200. spin_unlock_bh(&sync_dev->row_spinlocks[
  201. parent_info->sync_id]);
  202. kfree(parent_info);
  203. continue;
  204. }
  205. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  206. CAM_DBG(CAM_SYNC,
  207. "Warning: destroying active parent sync obj = %d",
  208. parent_info->sync_id);
  209. cam_sync_util_cleanup_children_list(parent_row,
  210. SYNC_LIST_CLEAN_ONE, idx);
  211. list_del_init(&parent_info->list);
  212. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  213. kfree(parent_info);
  214. }
  215. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  216. list_for_each_entry_safe(upayload_info, temp_upayload,
  217. &row->user_payload_list, list) {
  218. list_del_init(&upayload_info->list);
  219. kfree(upayload_info);
  220. }
  221. list_for_each_entry_safe(sync_cb, temp_cb,
  222. &row->callback_list, list) {
  223. list_del_init(&sync_cb->list);
  224. kfree(sync_cb);
  225. }
  226. memset(row, 0, sizeof(*row));
  227. clear_bit(idx, sync_dev->bitmap);
  228. INIT_LIST_HEAD(&row->callback_list);
  229. INIT_LIST_HEAD(&row->parents_list);
  230. INIT_LIST_HEAD(&row->children_list);
  231. INIT_LIST_HEAD(&row->user_payload_list);
  232. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  233. CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
  234. return 0;
  235. }
  236. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  237. {
  238. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  239. struct sync_callback_info,
  240. cb_dispatch_work);
  241. cb_info->callback_func(cb_info->sync_obj,
  242. cb_info->status,
  243. cb_info->cb_data);
  244. kfree(cb_info);
  245. }
  246. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  247. uint32_t status, uint32_t event_cause)
  248. {
  249. struct sync_callback_info *sync_cb;
  250. struct sync_user_payload *payload_info;
  251. struct sync_callback_info *temp_sync_cb;
  252. struct sync_table_row *signalable_row;
  253. struct sync_user_payload *temp_payload_info;
  254. signalable_row = sync_dev->sync_table + sync_obj;
  255. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  256. CAM_DBG(CAM_SYNC,
  257. "Accessing invalid sync object:%i", sync_obj);
  258. return;
  259. }
  260. /* Dispatch kernel callbacks if any were registered earlier */
  261. list_for_each_entry_safe(sync_cb,
  262. temp_sync_cb, &signalable_row->callback_list, list) {
  263. sync_cb->status = status;
  264. list_del_init(&sync_cb->list);
  265. queue_work(sync_dev->work_queue,
  266. &sync_cb->cb_dispatch_work);
  267. }
  268. /* Dispatch user payloads if any were registered earlier */
  269. list_for_each_entry_safe(payload_info, temp_payload_info,
  270. &signalable_row->user_payload_list, list) {
  271. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  272. if (!sync_dev->cam_sync_eventq) {
  273. spin_unlock_bh(
  274. &sync_dev->cam_sync_eventq_lock);
  275. break;
  276. }
  277. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  278. cam_sync_util_send_v4l2_event(
  279. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  280. sync_obj,
  281. status,
  282. payload_info->payload_data,
  283. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  284. event_cause);
  285. list_del_init(&payload_info->list);
  286. /*
  287. * We can free the list node here because
  288. * sending V4L event will make a deep copy
  289. * anyway
  290. */
  291. kfree(payload_info);
  292. }
  293. /*
  294. * This needs to be done because we want to unblock anyone
  295. * who might be blocked and waiting on this sync object
  296. */
  297. complete_all(&signalable_row->signaled);
  298. }
  299. void cam_sync_util_send_v4l2_event(uint32_t id,
  300. uint32_t sync_obj,
  301. int status,
  302. void *payload,
  303. int len, uint32_t event_cause)
  304. {
  305. struct v4l2_event event;
  306. __u64 *payload_data = NULL;
  307. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  308. struct cam_sync_ev_header_v2 *ev_header = NULL;
  309. event.id = id;
  310. event.type = CAM_SYNC_V4L_EVENT_V2;
  311. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  312. ev_header->sync_obj = sync_obj;
  313. ev_header->status = status;
  314. ev_header->version = sync_dev->version;
  315. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  316. event_cause;
  317. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  318. } else {
  319. struct cam_sync_ev_header *ev_header = NULL;
  320. event.id = id;
  321. event.type = CAM_SYNC_V4L_EVENT;
  322. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  323. ev_header->sync_obj = sync_obj;
  324. ev_header->status = status;
  325. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  326. }
  327. memcpy(payload_data, payload, len);
  328. v4l2_event_queue(sync_dev->vdev, &event);
  329. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  330. sync_dev->version,
  331. sync_obj);
  332. }
  333. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  334. int new_state)
  335. {
  336. int rc = 0;
  337. switch (parent_row->state) {
  338. case CAM_SYNC_STATE_ACTIVE:
  339. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  340. parent_row->state = new_state;
  341. break;
  342. case CAM_SYNC_STATE_SIGNALED_ERROR:
  343. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  344. break;
  345. case CAM_SYNC_STATE_INVALID:
  346. default:
  347. rc = -EINVAL;
  348. break;
  349. }
  350. return rc;
  351. }
  352. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  353. uint32_t list_clean_type, uint32_t sync_obj)
  354. {
  355. struct sync_child_info *child_info = NULL;
  356. struct sync_child_info *temp_child_info = NULL;
  357. uint32_t curr_sync_obj;
  358. list_for_each_entry_safe(child_info,
  359. temp_child_info, &row->children_list, list) {
  360. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  361. (child_info->sync_id != sync_obj))
  362. continue;
  363. curr_sync_obj = child_info->sync_id;
  364. list_del_init(&child_info->list);
  365. kfree(child_info);
  366. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  367. (curr_sync_obj == sync_obj))
  368. break;
  369. }
  370. }
  371. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  372. uint32_t list_clean_type, uint32_t sync_obj)
  373. {
  374. struct sync_parent_info *parent_info = NULL;
  375. struct sync_parent_info *temp_parent_info = NULL;
  376. uint32_t curr_sync_obj;
  377. list_for_each_entry_safe(parent_info,
  378. temp_parent_info, &row->parents_list, list) {
  379. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  380. (parent_info->sync_id != sync_obj))
  381. continue;
  382. curr_sync_obj = parent_info->sync_id;
  383. list_del_init(&parent_info->list);
  384. kfree(parent_info);
  385. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  386. (curr_sync_obj == sync_obj))
  387. break;
  388. }
  389. }