cam_sync_util.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
  4. */
  5. #include "cam_sync_util.h"
  6. #include "cam_req_mgr_workq.h"
  7. #include "cam_common_util.h"
  8. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  9. long *idx)
  10. {
  11. int rc = 0;
  12. mutex_lock(&sync_dev->table_lock);
  13. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  14. if (*idx < CAM_SYNC_MAX_OBJS)
  15. set_bit(*idx, sync_dev->bitmap);
  16. else
  17. rc = -1;
  18. mutex_unlock(&sync_dev->table_lock);
  19. return rc;
  20. }
  21. int cam_sync_init_row(struct sync_table_row *table,
  22. uint32_t idx, const char *name, uint32_t type)
  23. {
  24. struct sync_table_row *row = table + idx;
  25. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  26. return -EINVAL;
  27. memset(row, 0, sizeof(*row));
  28. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  29. INIT_LIST_HEAD(&row->parents_list);
  30. INIT_LIST_HEAD(&row->children_list);
  31. row->type = type;
  32. row->sync_id = idx;
  33. row->state = CAM_SYNC_STATE_ACTIVE;
  34. row->remaining = 0;
  35. atomic_set(&row->ref_cnt, 0);
  36. init_completion(&row->signaled);
  37. INIT_LIST_HEAD(&row->callback_list);
  38. INIT_LIST_HEAD(&row->user_payload_list);
  39. CAM_DBG(CAM_SYNC,
  40. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  41. row->name, row->sync_id, idx, row->state);
  42. return 0;
  43. }
  44. int cam_sync_init_group_object(struct sync_table_row *table,
  45. uint32_t idx,
  46. uint32_t *sync_objs,
  47. uint32_t num_objs)
  48. {
  49. int i, rc = 0;
  50. struct sync_child_info *child_info;
  51. struct sync_parent_info *parent_info;
  52. struct sync_table_row *row = table + idx;
  53. struct sync_table_row *child_row = NULL;
  54. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  55. /*
  56. * While traversing for children, parent's row list is updated with
  57. * child info and each child's row is updated with parent info.
  58. * If any child state is ERROR or SUCCESS, it will not be added to list.
  59. */
  60. for (i = 0; i < num_objs; i++) {
  61. child_row = table + sync_objs[i];
  62. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  63. /* validate child */
  64. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  65. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  66. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  67. CAM_ERR(CAM_SYNC,
  68. "Invalid child fence:%i state:%u type:%u",
  69. child_row->sync_id, child_row->state,
  70. child_row->type);
  71. rc = -EINVAL;
  72. goto clean_children_info;
  73. }
  74. /* check for child's state */
  75. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  76. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  77. row->state = child_row->state;
  78. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  79. continue;
  80. }
  81. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  82. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  83. continue;
  84. }
  85. row->remaining++;
  86. /* Add child info */
  87. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  88. if (!child_info) {
  89. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  90. rc = -ENOMEM;
  91. goto clean_children_info;
  92. }
  93. child_info->sync_id = sync_objs[i];
  94. list_add_tail(&child_info->list, &row->children_list);
  95. /* Add parent info */
  96. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  97. if (!parent_info) {
  98. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  99. rc = -ENOMEM;
  100. goto clean_children_info;
  101. }
  102. parent_info->sync_id = idx;
  103. list_add_tail(&parent_info->list, &child_row->parents_list);
  104. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  105. }
  106. if (!row->remaining) {
  107. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  108. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  109. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  110. complete_all(&row->signaled);
  111. }
  112. return 0;
  113. clean_children_info:
  114. row->state = CAM_SYNC_STATE_INVALID;
  115. for (i = i-1; i >= 0; i--) {
  116. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  117. child_row = table + sync_objs[i];
  118. cam_sync_util_cleanup_parents_list(child_row,
  119. SYNC_LIST_CLEAN_ONE, idx);
  120. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  121. }
  122. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  123. return rc;
  124. }
  125. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
  126. {
  127. struct sync_table_row *row = table + idx;
  128. struct sync_child_info *child_info, *temp_child;
  129. struct sync_callback_info *sync_cb, *temp_cb;
  130. struct sync_parent_info *parent_info, *temp_parent;
  131. struct sync_user_payload *upayload_info, *temp_upayload;
  132. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  133. struct list_head temp_child_list, temp_parent_list;
  134. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  135. return -EINVAL;
  136. CAM_DBG(CAM_SYNC,
  137. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  138. row->name, row->sync_id, idx, row->state);
  139. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  140. if (row->state == CAM_SYNC_STATE_INVALID) {
  141. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  142. CAM_ERR(CAM_SYNC,
  143. "Error: accessing an uninitialized sync obj: idx = %d name = %s",
  144. idx,
  145. row->name);
  146. return -EINVAL;
  147. }
  148. if (row->state == CAM_SYNC_STATE_ACTIVE)
  149. CAM_DBG(CAM_SYNC,
  150. "Destroying an active sync object name:%s id:%i",
  151. row->name, row->sync_id);
  152. row->state = CAM_SYNC_STATE_INVALID;
  153. /* Object's child and parent objects will be added into this list */
  154. INIT_LIST_HEAD(&temp_child_list);
  155. INIT_LIST_HEAD(&temp_parent_list);
  156. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  157. list) {
  158. if (child_info->sync_id <= 0)
  159. continue;
  160. list_del_init(&child_info->list);
  161. list_add_tail(&child_info->list, &temp_child_list);
  162. }
  163. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  164. list) {
  165. if (parent_info->sync_id <= 0)
  166. continue;
  167. list_del_init(&parent_info->list);
  168. list_add_tail(&parent_info->list, &temp_parent_list);
  169. }
  170. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  171. /* Cleanup the child to parent link from child list */
  172. while (!list_empty(&temp_child_list)) {
  173. child_info = list_first_entry(&temp_child_list,
  174. struct sync_child_info, list);
  175. child_row = sync_dev->sync_table + child_info->sync_id;
  176. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  177. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  178. list_del_init(&child_info->list);
  179. spin_unlock_bh(&sync_dev->row_spinlocks[
  180. child_info->sync_id]);
  181. kfree(child_info);
  182. continue;
  183. }
  184. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  185. CAM_DBG(CAM_SYNC,
  186. "Warning: destroying active child sync obj = %s[%d]",
  187. child_row->name,
  188. child_info->sync_id);
  189. cam_sync_util_cleanup_parents_list(child_row,
  190. SYNC_LIST_CLEAN_ONE, idx);
  191. list_del_init(&child_info->list);
  192. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  193. kfree(child_info);
  194. }
  195. /* Cleanup the parent to child link */
  196. while (!list_empty(&temp_parent_list)) {
  197. parent_info = list_first_entry(&temp_parent_list,
  198. struct sync_parent_info, list);
  199. parent_row = sync_dev->sync_table + parent_info->sync_id;
  200. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  201. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  202. list_del_init(&parent_info->list);
  203. spin_unlock_bh(&sync_dev->row_spinlocks[
  204. parent_info->sync_id]);
  205. kfree(parent_info);
  206. continue;
  207. }
  208. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  209. CAM_DBG(CAM_SYNC,
  210. "Warning: destroying active parent sync obj = %s[%d]",
  211. parent_row->name,
  212. parent_info->sync_id);
  213. cam_sync_util_cleanup_children_list(parent_row,
  214. SYNC_LIST_CLEAN_ONE, idx);
  215. list_del_init(&parent_info->list);
  216. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  217. kfree(parent_info);
  218. }
  219. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  220. list_for_each_entry_safe(upayload_info, temp_upayload,
  221. &row->user_payload_list, list) {
  222. list_del_init(&upayload_info->list);
  223. kfree(upayload_info);
  224. }
  225. list_for_each_entry_safe(sync_cb, temp_cb,
  226. &row->callback_list, list) {
  227. list_del_init(&sync_cb->list);
  228. kfree(sync_cb);
  229. }
  230. memset(row, 0, sizeof(*row));
  231. clear_bit(idx, sync_dev->bitmap);
  232. INIT_LIST_HEAD(&row->callback_list);
  233. INIT_LIST_HEAD(&row->parents_list);
  234. INIT_LIST_HEAD(&row->children_list);
  235. INIT_LIST_HEAD(&row->user_payload_list);
  236. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  237. return 0;
  238. }
  239. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  240. {
  241. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  242. struct sync_callback_info,
  243. cb_dispatch_work);
  244. sync_callback sync_data = cb_info->callback_func;
  245. cam_common_util_thread_switch_delay_detect(
  246. "CAM-SYNC workq schedule",
  247. cb_info->workq_scheduled_ts,
  248. CAM_WORKQ_SCHEDULE_TIME_THRESHOLD);
  249. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  250. kfree(cb_info);
  251. }
  252. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  253. uint32_t status, uint32_t event_cause)
  254. {
  255. struct sync_callback_info *sync_cb;
  256. struct sync_user_payload *payload_info;
  257. struct sync_callback_info *temp_sync_cb;
  258. struct sync_table_row *signalable_row;
  259. struct sync_user_payload *temp_payload_info;
  260. signalable_row = sync_dev->sync_table + sync_obj;
  261. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  262. CAM_DBG(CAM_SYNC,
  263. "Accessing invalid sync object:%s[%i]", signalable_row->name,
  264. sync_obj);
  265. return;
  266. }
  267. /* Dispatch kernel callbacks if any were registered earlier */
  268. list_for_each_entry_safe(sync_cb,
  269. temp_sync_cb, &signalable_row->callback_list, list) {
  270. sync_cb->status = status;
  271. list_del_init(&sync_cb->list);
  272. queue_work(sync_dev->work_queue,
  273. &sync_cb->cb_dispatch_work);
  274. }
  275. /* Dispatch user payloads if any were registered earlier */
  276. list_for_each_entry_safe(payload_info, temp_payload_info,
  277. &signalable_row->user_payload_list, list) {
  278. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  279. if (!sync_dev->cam_sync_eventq) {
  280. spin_unlock_bh(
  281. &sync_dev->cam_sync_eventq_lock);
  282. break;
  283. }
  284. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  285. cam_sync_util_send_v4l2_event(
  286. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  287. sync_obj,
  288. status,
  289. payload_info->payload_data,
  290. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  291. event_cause);
  292. list_del_init(&payload_info->list);
  293. /*
  294. * We can free the list node here because
  295. * sending V4L event will make a deep copy
  296. * anyway
  297. */
  298. kfree(payload_info);
  299. }
  300. /*
  301. * This needs to be done because we want to unblock anyone
  302. * who might be blocked and waiting on this sync object
  303. */
  304. complete_all(&signalable_row->signaled);
  305. }
  306. void cam_sync_util_send_v4l2_event(uint32_t id,
  307. uint32_t sync_obj,
  308. int status,
  309. void *payload,
  310. int len, uint32_t event_cause)
  311. {
  312. struct v4l2_event event;
  313. __u64 *payload_data = NULL;
  314. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  315. struct cam_sync_ev_header_v2 *ev_header = NULL;
  316. event.id = id;
  317. event.type = CAM_SYNC_V4L_EVENT_V2;
  318. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  319. ev_header->sync_obj = sync_obj;
  320. ev_header->status = status;
  321. ev_header->version = sync_dev->version;
  322. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  323. event_cause;
  324. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  325. } else {
  326. struct cam_sync_ev_header *ev_header = NULL;
  327. event.id = id;
  328. event.type = CAM_SYNC_V4L_EVENT;
  329. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  330. ev_header->sync_obj = sync_obj;
  331. ev_header->status = status;
  332. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  333. }
  334. memcpy(payload_data, payload, len);
  335. v4l2_event_queue(sync_dev->vdev, &event);
  336. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  337. sync_dev->version,
  338. sync_obj);
  339. }
  340. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  341. int new_state)
  342. {
  343. int rc = 0;
  344. switch (parent_row->state) {
  345. case CAM_SYNC_STATE_ACTIVE:
  346. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  347. parent_row->state = new_state;
  348. break;
  349. case CAM_SYNC_STATE_SIGNALED_ERROR:
  350. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  351. break;
  352. case CAM_SYNC_STATE_INVALID:
  353. default:
  354. rc = -EINVAL;
  355. break;
  356. }
  357. return rc;
  358. }
  359. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  360. uint32_t list_clean_type, uint32_t sync_obj)
  361. {
  362. struct sync_child_info *child_info = NULL;
  363. struct sync_child_info *temp_child_info = NULL;
  364. uint32_t curr_sync_obj;
  365. list_for_each_entry_safe(child_info,
  366. temp_child_info, &row->children_list, list) {
  367. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  368. (child_info->sync_id != sync_obj))
  369. continue;
  370. curr_sync_obj = child_info->sync_id;
  371. list_del_init(&child_info->list);
  372. kfree(child_info);
  373. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  374. (curr_sync_obj == sync_obj))
  375. break;
  376. }
  377. }
  378. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  379. uint32_t list_clean_type, uint32_t sync_obj)
  380. {
  381. struct sync_parent_info *parent_info = NULL;
  382. struct sync_parent_info *temp_parent_info = NULL;
  383. uint32_t curr_sync_obj;
  384. list_for_each_entry_safe(parent_info,
  385. temp_parent_info, &row->parents_list, list) {
  386. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  387. (parent_info->sync_id != sync_obj))
  388. continue;
  389. curr_sync_obj = parent_info->sync_id;
  390. list_del_init(&parent_info->list);
  391. kfree(parent_info);
  392. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  393. (curr_sync_obj == sync_obj))
  394. break;
  395. }
  396. }