cam_sync_util.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved.
  4. */
  5. #include "cam_sync_util.h"
  6. #include "cam_req_mgr_workq.h"
  7. int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
  8. long *idx)
  9. {
  10. int rc = 0;
  11. mutex_lock(&sync_dev->table_lock);
  12. *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  13. if (*idx < CAM_SYNC_MAX_OBJS)
  14. set_bit(*idx, sync_dev->bitmap);
  15. else
  16. rc = -1;
  17. mutex_unlock(&sync_dev->table_lock);
  18. return rc;
  19. }
  20. int cam_sync_init_row(struct sync_table_row *table,
  21. uint32_t idx, const char *name, uint32_t type)
  22. {
  23. struct sync_table_row *row = table + idx;
  24. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  25. return -EINVAL;
  26. memset(row, 0, sizeof(*row));
  27. if (name)
  28. strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
  29. INIT_LIST_HEAD(&row->parents_list);
  30. INIT_LIST_HEAD(&row->children_list);
  31. row->type = type;
  32. row->sync_id = idx;
  33. row->state = CAM_SYNC_STATE_ACTIVE;
  34. row->remaining = 0;
  35. atomic_set(&row->ref_cnt, 0);
  36. init_completion(&row->signaled);
  37. INIT_LIST_HEAD(&row->callback_list);
  38. INIT_LIST_HEAD(&row->user_payload_list);
  39. CAM_DBG(CAM_SYNC,
  40. "row name:%s sync_id:%i [idx:%u] row_state:%u ",
  41. row->name, row->sync_id, idx, row->state);
  42. return 0;
  43. }
  44. int cam_sync_init_group_object(struct sync_table_row *table,
  45. uint32_t idx,
  46. uint32_t *sync_objs,
  47. uint32_t num_objs)
  48. {
  49. int i, rc = 0;
  50. struct sync_child_info *child_info;
  51. struct sync_parent_info *parent_info;
  52. struct sync_table_row *row = table + idx;
  53. struct sync_table_row *child_row = NULL;
  54. cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
  55. /*
  56. * While traversing for children, parent's row list is updated with
  57. * child info and each child's row is updated with parent info.
  58. * If any child state is ERROR or SUCCESS, it will not be added to list.
  59. */
  60. for (i = 0; i < num_objs; i++) {
  61. child_row = table + sync_objs[i];
  62. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  63. /* validate child */
  64. if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
  65. (child_row->state == CAM_SYNC_STATE_INVALID)) {
  66. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  67. CAM_ERR(CAM_SYNC,
  68. "Invalid child fence:%i state:%u type:%u",
  69. child_row->sync_id, child_row->state,
  70. child_row->type);
  71. rc = -EINVAL;
  72. goto clean_children_info;
  73. }
  74. /* check for child's state */
  75. if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  76. (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  77. row->state = child_row->state;
  78. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  79. continue;
  80. }
  81. if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
  82. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  83. continue;
  84. }
  85. row->remaining++;
  86. /* Add child info */
  87. child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
  88. if (!child_info) {
  89. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  90. rc = -ENOMEM;
  91. goto clean_children_info;
  92. }
  93. child_info->sync_id = sync_objs[i];
  94. list_add_tail(&child_info->list, &row->children_list);
  95. /* Add parent info */
  96. parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
  97. if (!parent_info) {
  98. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  99. rc = -ENOMEM;
  100. goto clean_children_info;
  101. }
  102. parent_info->sync_id = idx;
  103. list_add_tail(&parent_info->list, &child_row->parents_list);
  104. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  105. }
  106. if (!row->remaining) {
  107. if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  108. (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL))
  109. row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  110. complete_all(&row->signaled);
  111. }
  112. return 0;
  113. clean_children_info:
  114. row->state = CAM_SYNC_STATE_INVALID;
  115. for (i = i-1; i >= 0; i--) {
  116. spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  117. child_row = table + sync_objs[i];
  118. cam_sync_util_cleanup_parents_list(child_row,
  119. SYNC_LIST_CLEAN_ONE, idx);
  120. spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
  121. }
  122. cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
  123. return rc;
  124. }
  125. int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
  126. {
  127. struct sync_table_row *row = table + idx;
  128. struct sync_child_info *child_info, *temp_child;
  129. struct sync_callback_info *sync_cb, *temp_cb;
  130. struct sync_parent_info *parent_info, *temp_parent;
  131. struct sync_user_payload *upayload_info, *temp_upayload;
  132. struct sync_table_row *child_row = NULL, *parent_row = NULL;
  133. struct list_head temp_child_list, temp_parent_list;
  134. if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
  135. return -EINVAL;
  136. CAM_DBG(CAM_SYNC,
  137. "row name:%s sync_id:%i [idx:%u] row_state:%u",
  138. row->name, row->sync_id, idx, row->state);
  139. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  140. if (row->state == CAM_SYNC_STATE_INVALID) {
  141. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  142. CAM_ERR(CAM_SYNC,
  143. "Error: accessing an uninitialized sync obj: idx = %d",
  144. idx);
  145. return -EINVAL;
  146. }
  147. if (row->state == CAM_SYNC_STATE_ACTIVE)
  148. CAM_DBG(CAM_SYNC,
  149. "Destroying an active sync object name:%s id:%i",
  150. row->name, row->sync_id);
  151. row->state = CAM_SYNC_STATE_INVALID;
  152. /* Object's child and parent objects will be added into this list */
  153. INIT_LIST_HEAD(&temp_child_list);
  154. INIT_LIST_HEAD(&temp_parent_list);
  155. list_for_each_entry_safe(child_info, temp_child, &row->children_list,
  156. list) {
  157. if (child_info->sync_id <= 0)
  158. continue;
  159. list_del_init(&child_info->list);
  160. list_add_tail(&child_info->list, &temp_child_list);
  161. }
  162. list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
  163. list) {
  164. if (parent_info->sync_id <= 0)
  165. continue;
  166. list_del_init(&parent_info->list);
  167. list_add_tail(&parent_info->list, &temp_parent_list);
  168. }
  169. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  170. /* Cleanup the child to parent link from child list */
  171. while (!list_empty(&temp_child_list)) {
  172. child_info = list_first_entry(&temp_child_list,
  173. struct sync_child_info, list);
  174. child_row = sync_dev->sync_table + child_info->sync_id;
  175. spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  176. if (child_row->state == CAM_SYNC_STATE_INVALID) {
  177. list_del_init(&child_info->list);
  178. spin_unlock_bh(&sync_dev->row_spinlocks[
  179. child_info->sync_id]);
  180. kfree(child_info);
  181. continue;
  182. }
  183. if (child_row->state == CAM_SYNC_STATE_ACTIVE)
  184. CAM_DBG(CAM_SYNC,
  185. "Warning: destroying active child sync obj = %d",
  186. child_info->sync_id);
  187. cam_sync_util_cleanup_parents_list(child_row,
  188. SYNC_LIST_CLEAN_ONE, idx);
  189. list_del_init(&child_info->list);
  190. spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
  191. kfree(child_info);
  192. }
  193. /* Cleanup the parent to child link */
  194. while (!list_empty(&temp_parent_list)) {
  195. parent_info = list_first_entry(&temp_parent_list,
  196. struct sync_parent_info, list);
  197. parent_row = sync_dev->sync_table + parent_info->sync_id;
  198. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  199. if (parent_row->state == CAM_SYNC_STATE_INVALID) {
  200. list_del_init(&parent_info->list);
  201. spin_unlock_bh(&sync_dev->row_spinlocks[
  202. parent_info->sync_id]);
  203. kfree(parent_info);
  204. continue;
  205. }
  206. if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
  207. CAM_DBG(CAM_SYNC,
  208. "Warning: destroying active parent sync obj = %d",
  209. parent_info->sync_id);
  210. cam_sync_util_cleanup_children_list(parent_row,
  211. SYNC_LIST_CLEAN_ONE, idx);
  212. list_del_init(&parent_info->list);
  213. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  214. kfree(parent_info);
  215. }
  216. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  217. list_for_each_entry_safe(upayload_info, temp_upayload,
  218. &row->user_payload_list, list) {
  219. list_del_init(&upayload_info->list);
  220. kfree(upayload_info);
  221. }
  222. list_for_each_entry_safe(sync_cb, temp_cb,
  223. &row->callback_list, list) {
  224. list_del_init(&sync_cb->list);
  225. kfree(sync_cb);
  226. }
  227. memset(row, 0, sizeof(*row));
  228. clear_bit(idx, sync_dev->bitmap);
  229. INIT_LIST_HEAD(&row->callback_list);
  230. INIT_LIST_HEAD(&row->parents_list);
  231. INIT_LIST_HEAD(&row->children_list);
  232. INIT_LIST_HEAD(&row->user_payload_list);
  233. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  234. CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
  235. return 0;
  236. }
  237. void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
  238. {
  239. struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
  240. struct sync_callback_info,
  241. cb_dispatch_work);
  242. sync_callback sync_data = cb_info->callback_func;
  243. cam_req_mgr_thread_switch_delay_detect(
  244. cb_info->workq_scheduled_ts);
  245. sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
  246. kfree(cb_info);
  247. }
  248. void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
  249. uint32_t status, uint32_t event_cause)
  250. {
  251. struct sync_callback_info *sync_cb;
  252. struct sync_user_payload *payload_info;
  253. struct sync_callback_info *temp_sync_cb;
  254. struct sync_table_row *signalable_row;
  255. struct sync_user_payload *temp_payload_info;
  256. signalable_row = sync_dev->sync_table + sync_obj;
  257. if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
  258. CAM_DBG(CAM_SYNC,
  259. "Accessing invalid sync object:%i", sync_obj);
  260. return;
  261. }
  262. /* Dispatch kernel callbacks if any were registered earlier */
  263. list_for_each_entry_safe(sync_cb,
  264. temp_sync_cb, &signalable_row->callback_list, list) {
  265. sync_cb->status = status;
  266. list_del_init(&sync_cb->list);
  267. queue_work(sync_dev->work_queue,
  268. &sync_cb->cb_dispatch_work);
  269. }
  270. /* Dispatch user payloads if any were registered earlier */
  271. list_for_each_entry_safe(payload_info, temp_payload_info,
  272. &signalable_row->user_payload_list, list) {
  273. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  274. if (!sync_dev->cam_sync_eventq) {
  275. spin_unlock_bh(
  276. &sync_dev->cam_sync_eventq_lock);
  277. break;
  278. }
  279. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  280. cam_sync_util_send_v4l2_event(
  281. CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  282. sync_obj,
  283. status,
  284. payload_info->payload_data,
  285. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64),
  286. event_cause);
  287. list_del_init(&payload_info->list);
  288. /*
  289. * We can free the list node here because
  290. * sending V4L event will make a deep copy
  291. * anyway
  292. */
  293. kfree(payload_info);
  294. }
  295. /*
  296. * This needs to be done because we want to unblock anyone
  297. * who might be blocked and waiting on this sync object
  298. */
  299. complete_all(&signalable_row->signaled);
  300. }
  301. void cam_sync_util_send_v4l2_event(uint32_t id,
  302. uint32_t sync_obj,
  303. int status,
  304. void *payload,
  305. int len, uint32_t event_cause)
  306. {
  307. struct v4l2_event event;
  308. __u64 *payload_data = NULL;
  309. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  310. struct cam_sync_ev_header_v2 *ev_header = NULL;
  311. event.id = id;
  312. event.type = CAM_SYNC_V4L_EVENT_V2;
  313. ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event);
  314. ev_header->sync_obj = sync_obj;
  315. ev_header->status = status;
  316. ev_header->version = sync_dev->version;
  317. ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] =
  318. event_cause;
  319. payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64);
  320. } else {
  321. struct cam_sync_ev_header *ev_header = NULL;
  322. event.id = id;
  323. event.type = CAM_SYNC_V4L_EVENT;
  324. ev_header = CAM_SYNC_GET_HEADER_PTR(event);
  325. ev_header->sync_obj = sync_obj;
  326. ev_header->status = status;
  327. payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
  328. }
  329. memcpy(payload_data, payload, len);
  330. v4l2_event_queue(sync_dev->vdev, &event);
  331. CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d",
  332. sync_dev->version,
  333. sync_obj);
  334. }
  335. int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
  336. int new_state)
  337. {
  338. int rc = 0;
  339. switch (parent_row->state) {
  340. case CAM_SYNC_STATE_ACTIVE:
  341. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  342. parent_row->state = new_state;
  343. break;
  344. case CAM_SYNC_STATE_SIGNALED_ERROR:
  345. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  346. break;
  347. case CAM_SYNC_STATE_INVALID:
  348. default:
  349. rc = -EINVAL;
  350. break;
  351. }
  352. return rc;
  353. }
  354. void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
  355. uint32_t list_clean_type, uint32_t sync_obj)
  356. {
  357. struct sync_child_info *child_info = NULL;
  358. struct sync_child_info *temp_child_info = NULL;
  359. uint32_t curr_sync_obj;
  360. list_for_each_entry_safe(child_info,
  361. temp_child_info, &row->children_list, list) {
  362. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  363. (child_info->sync_id != sync_obj))
  364. continue;
  365. curr_sync_obj = child_info->sync_id;
  366. list_del_init(&child_info->list);
  367. kfree(child_info);
  368. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  369. (curr_sync_obj == sync_obj))
  370. break;
  371. }
  372. }
  373. void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
  374. uint32_t list_clean_type, uint32_t sync_obj)
  375. {
  376. struct sync_parent_info *parent_info = NULL;
  377. struct sync_parent_info *temp_parent_info = NULL;
  378. uint32_t curr_sync_obj;
  379. list_for_each_entry_safe(parent_info,
  380. temp_parent_info, &row->parents_list, list) {
  381. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  382. (parent_info->sync_id != sync_obj))
  383. continue;
  384. curr_sync_obj = parent_info->sync_id;
  385. list_del_init(&parent_info->list);
  386. kfree(parent_info);
  387. if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
  388. (curr_sync_obj == sync_obj))
  389. break;
  390. }
  391. }