cam_sync_util.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved.
  4. */
  5. #include "cam_sync_util.h"
  6. #include "cam_req_mgr_workq.h"
  7. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  8. long *idx)
  9. {
  10. int rc = 0;
  11. mutex_lock(&sync_dev->table_lock);
  12. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  13. if (*idx < CAM_SYNC_MAX_OBJS)
  14. set_bit(*idx, sync_dev->bitmap);
  15. else
  16. rc = -1;
  17. mutex_unlock(&sync_dev->table_lock);
  18. return rc;
  19. }
  20. int cam_sync_init_row(struct sync_table_row *table,
  21. uint32_t idx, const char *name, uint32_t type)
  22. {
  23. struct sync_table_row *row = table + idx;
  24. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  25. return -EINVAL;
  26. memset(row, 0, sizeof(*row));
  27. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  28. INIT_LIST_HEAD(&row->parents_list);
  29. INIT_LIST_HEAD(&row->children_list);
  30. row->type = type;
  31. row->sync_id = idx;
  32. row->state = CAM_SYNC_STATE_ACTIVE;
  33. row->remaining = 0;
  34. atomic_set(&row->ref_cnt, 0);
  35. init_completion(&row->signaled);
  36. INIT_LIST_HEAD(&row->callback_list);
  37. INIT_LIST_HEAD(&row->user_payload_list);
  38. CAM_DBG(CAM_SYNC,
  39. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  40. row->name, row->sync_id, idx, row->state);
  41. return 0;
  42. }
  43. int cam_sync_init_group_object(struct sync_table_row *table,
  44. uint32_t idx,
  45. uint32_t *sync_objs,
  46. uint32_t num_objs)
  47. {
  48. int i, rc = 0;
  49. struct sync_child_info *child_info;
  50. struct sync_parent_info *parent_info;
  51. struct sync_table_row *row = table + idx;
  52. struct sync_table_row *child_row = NULL;
  53. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  54. /*
  55. * While traversing for children, parent's row list is updated with
  56. * child info and each child's row is updated with parent info.
  57. * If any child state is ERROR or SUCCESS, it will not be added to list.
  58. */
  59. for (i = 0; i < num_objs; i++) {
  60. child_row = table + sync_objs[i];
  61. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  62. /* validate child */
  63. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  64. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  65. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  66. CAM_ERR(CAM_SYNC,
  67. "Invalid child fence:%i state:%u type:%u",
  68. child_row->sync_id, child_row->state,
  69. child_row->type);
  70. rc = -EINVAL;
  71. goto clean_children_info;
  72. }
  73. /* check for child's state */
  74. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  75. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  76. row->state = child_row->state;
  77. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  78. continue;
  79. }
  80. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  81. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  82. continue;
  83. }
  84. row->remaining++;
  85. /* Add child info */
  86. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  87. if (!child_info) {
  88. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  89. rc = -ENOMEM;
  90. goto clean_children_info;
  91. }
  92. child_info->sync_id = sync_objs[i];
  93. list_add_tail(&child_info->list, &row->children_list);
  94. /* Add parent info */
  95. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  96. if (!parent_info) {
  97. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  98. rc = -ENOMEM;
  99. goto clean_children_info;
  100. }
  101. parent_info->sync_id = idx;
  102. list_add_tail(&parent_info->list, &child_row->parents_list);
  103. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  104. }
  105. if (!row->remaining) {
  106. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  107. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  108. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  109. complete_all(&row->signaled);
  110. }
  111. return 0;
  112. clean_children_info:
  113. row->state = CAM_SYNC_STATE_INVALID;
  114. for (i = i-1; i >= 0; i--) {
  115. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  116. child_row = table + sync_objs[i];
  117. cam_sync_util_cleanup_parents_list(child_row,
  118. SYNC_LIST_CLEAN_ONE, idx);
  119. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  120. }
  121. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  122. return rc;
  123. }
  124. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
  125. {
  126. struct sync_table_row *row = table + idx;
  127. struct sync_child_info *child_info, *temp_child;
  128. struct sync_callback_info *sync_cb, *temp_cb;
  129. struct sync_parent_info *parent_info, *temp_parent;
  130. struct sync_user_payload *upayload_info, *temp_upayload;
  131. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  132. struct list_head temp_child_list, temp_parent_list;
  133. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  134. return -EINVAL;
  135. CAM_DBG(CAM_SYNC,
  136. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  137. row->name, row->sync_id, idx, row->state);
  138. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  139. if (row->state == CAM_SYNC_STATE_INVALID) {
  140. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  141. CAM_ERR(CAM_SYNC,
  142. "Error: accessing an uninitialized sync obj: idx = %d name = %s",
  143. idx,
  144. row->name);
  145. return -EINVAL;
  146. }
  147. if (row->state == CAM_SYNC_STATE_ACTIVE)
  148. CAM_DBG(CAM_SYNC,
  149. "Destroying an active sync object name:%s id:%i",
  150. row->name, row->sync_id);
  151. row->state = CAM_SYNC_STATE_INVALID;
  152. /* Object's child and parent objects will be added into this list */
  153. INIT_LIST_HEAD(&temp_child_list);
  154. INIT_LIST_HEAD(&temp_parent_list);
  155. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  156. list) {
  157. if (child_info->sync_id <= 0)
  158. continue;
  159. list_del_init(&child_info->list);
  160. list_add_tail(&child_info->list, &temp_child_list);
  161. }
  162. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  163. list) {
  164. if (parent_info->sync_id <= 0)
  165. continue;
  166. list_del_init(&parent_info->list);
  167. list_add_tail(&parent_info->list, &temp_parent_list);
  168. }
  169. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  170. /* Cleanup the child to parent link from child list */
  171. while (!list_empty(&temp_child_list)) {
  172. child_info = list_first_entry(&temp_child_list,
  173. struct sync_child_info, list);
  174. child_row = sync_dev->sync_table + child_info->sync_id;
  175. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  176. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  177. list_del_init(&child_info->list);
  178. spin_unlock_bh(&sync_dev->row_spinlocks[
  179. child_info->sync_id]);
  180. kfree(child_info);
  181. continue;
  182. }
  183. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  184. CAM_DBG(CAM_SYNC,
  185. "Warning: destroying active child sync obj = %s[%d]",
  186. child_row->name,
  187. child_info->sync_id);
  188. cam_sync_util_cleanup_parents_list(child_row,
  189. SYNC_LIST_CLEAN_ONE, idx);
  190. list_del_init(&child_info->list);
  191. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  192. kfree(child_info);
  193. }
  194. /* Cleanup the parent to child link */
  195. while (!list_empty(&temp_parent_list)) {
  196. parent_info = list_first_entry(&temp_parent_list,
  197. struct sync_parent_info, list);
  198. parent_row = sync_dev->sync_table + parent_info->sync_id;
  199. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  200. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  201. list_del_init(&parent_info->list);
  202. spin_unlock_bh(&sync_dev->row_spinlocks[
  203. parent_info->sync_id]);
  204. kfree(parent_info);
  205. continue;
  206. }
  207. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  208. CAM_DBG(CAM_SYNC,
  209. "Warning: destroying active parent sync obj = %s[%d]",
  210. parent_row->name,
  211. parent_info->sync_id);
  212. cam_sync_util_cleanup_children_list(parent_row,
  213. SYNC_LIST_CLEAN_ONE, idx);
  214. list_del_init(&parent_info->list);
  215. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  216. kfree(parent_info);
  217. }
  218. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  219. list_for_each_entry_safe(upayload_info, temp_upayload,
  220. &row->user_payload_list, list) {
  221. list_del_init(&upayload_info->list);
  222. kfree(upayload_info);
  223. }
  224. list_for_each_entry_safe(sync_cb, temp_cb,
  225. &row->callback_list, list) {
  226. list_del_init(&sync_cb->list);
  227. kfree(sync_cb);
  228. }
  229. memset(row, 0, sizeof(*row));
  230. clear_bit(idx, sync_dev->bitmap);
  231. INIT_LIST_HEAD(&row->callback_list);
  232. INIT_LIST_HEAD(&row->parents_list);
  233. INIT_LIST_HEAD(&row->children_list);
  234. INIT_LIST_HEAD(&row->user_payload_list);
  235. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  236. return 0;
  237. }
  238. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  239. {
  240. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  241. struct sync_callback_info,
  242. cb_dispatch_work);
  243. sync_callback sync_data = cb_info->callback_func;
  244. cam_req_mgr_thread_switch_delay_detect(
  245. cb_info->workq_scheduled_ts);
  246. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  247. kfree(cb_info);
  248. }
  249. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  250. uint32_t status, uint32_t event_cause)
  251. {
  252. struct sync_callback_info *sync_cb;
  253. struct sync_user_payload *payload_info;
  254. struct sync_callback_info *temp_sync_cb;
  255. struct sync_table_row *signalable_row;
  256. struct sync_user_payload *temp_payload_info;
  257. signalable_row = sync_dev->sync_table + sync_obj;
  258. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  259. CAM_DBG(CAM_SYNC,
  260. "Accessing invalid sync object:%s[%i]", signalable_row->name,
  261. sync_obj);
  262. return;
  263. }
  264. /* Dispatch kernel callbacks if any were registered earlier */
  265. list_for_each_entry_safe(sync_cb,
  266. temp_sync_cb, &signalable_row->callback_list, list) {
  267. sync_cb->status = status;
  268. list_del_init(&sync_cb->list);
  269. queue_work(sync_dev->work_queue,
  270. &sync_cb->cb_dispatch_work);
  271. }
  272. /* Dispatch user payloads if any were registered earlier */
  273. list_for_each_entry_safe(payload_info, temp_payload_info,
  274. &signalable_row->user_payload_list, list) {
  275. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  276. if (!sync_dev->cam_sync_eventq) {
  277. spin_unlock_bh(
  278. &sync_dev->cam_sync_eventq_lock);
  279. break;
  280. }
  281. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  282. cam_sync_util_send_v4l2_event(
  283. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  284. sync_obj,
  285. status,
  286. payload_info->payload_data,
  287. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  288. event_cause);
  289. list_del_init(&payload_info->list);
  290. /*
  291. * We can free the list node here because
  292. * sending V4L event will make a deep copy
  293. * anyway
  294. */
  295. kfree(payload_info);
  296. }
  297. /*
  298. * This needs to be done because we want to unblock anyone
  299. * who might be blocked and waiting on this sync object
  300. */
  301. complete_all(&signalable_row->signaled);
  302. }
  303. void cam_sync_util_send_v4l2_event(uint32_t id,
  304. uint32_t sync_obj,
  305. int status,
  306. void *payload,
  307. int len, uint32_t event_cause)
  308. {
  309. struct v4l2_event event;
  310. __u64 *payload_data = NULL;
  311. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  312. struct cam_sync_ev_header_v2 *ev_header = NULL;
  313. event.id = id;
  314. event.type = CAM_SYNC_V4L_EVENT_V2;
  315. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  316. ev_header->sync_obj = sync_obj;
  317. ev_header->status = status;
  318. ev_header->version = sync_dev->version;
  319. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  320. event_cause;
  321. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  322. } else {
  323. struct cam_sync_ev_header *ev_header = NULL;
  324. event.id = id;
  325. event.type = CAM_SYNC_V4L_EVENT;
  326. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  327. ev_header->sync_obj = sync_obj;
  328. ev_header->status = status;
  329. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  330. }
  331. memcpy(payload_data, payload, len);
  332. v4l2_event_queue(sync_dev->vdev, &event);
  333. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  334. sync_dev->version,
  335. sync_obj);
  336. }
  337. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  338. int new_state)
  339. {
  340. int rc = 0;
  341. switch (parent_row->state) {
  342. case CAM_SYNC_STATE_ACTIVE:
  343. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  344. parent_row->state = new_state;
  345. break;
  346. case CAM_SYNC_STATE_SIGNALED_ERROR:
  347. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  348. break;
  349. case CAM_SYNC_STATE_INVALID:
  350. default:
  351. rc = -EINVAL;
  352. break;
  353. }
  354. return rc;
  355. }
  356. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  357. uint32_t list_clean_type, uint32_t sync_obj)
  358. {
  359. struct sync_child_info *child_info = NULL;
  360. struct sync_child_info *temp_child_info = NULL;
  361. uint32_t curr_sync_obj;
  362. list_for_each_entry_safe(child_info,
  363. temp_child_info, &row->children_list, list) {
  364. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  365. (child_info->sync_id != sync_obj))
  366. continue;
  367. curr_sync_obj = child_info->sync_id;
  368. list_del_init(&child_info->list);
  369. kfree(child_info);
  370. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  371. (curr_sync_obj == sync_obj))
  372. break;
  373. }
  374. }
  375. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  376. uint32_t list_clean_type, uint32_t sync_obj)
  377. {
  378. struct sync_parent_info *parent_info = NULL;
  379. struct sync_parent_info *temp_parent_info = NULL;
  380. uint32_t curr_sync_obj;
  381. list_for_each_entry_safe(parent_info,
  382. temp_parent_info, &row->parents_list, list) {
  383. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  384. (parent_info->sync_id != sync_obj))
  385. continue;
  386. curr_sync_obj = parent_info->sync_id;
  387. list_del_init(&parent_info->list);
  388. kfree(parent_info);
  389. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  390. (curr_sync_obj == sync_obj))
  391. break;
  392. }
  393. }