cam_sync.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "camera_main.h"
  18. struct sync_device *sync_dev;
  19. /*
  20. * Flag to determine whether to enqueue cb of a
  21. * signaled fence onto the workq or invoke it
  22. * directly in the same context
  23. */
  24. static bool trigger_cb_without_switch;
  25. static void cam_sync_print_fence_table(void)
  26. {
  27. int idx;
  28. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  29. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  30. CAM_INFO(CAM_SYNC,
  31. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  32. idx,
  33. sync_dev->sync_table[idx].sync_id,
  34. sync_dev->sync_table[idx].name,
  35. sync_dev->sync_table[idx].type,
  36. sync_dev->sync_table[idx].state,
  37. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  38. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  39. }
  40. }
  41. int cam_sync_create(int32_t *sync_obj, const char *name)
  42. {
  43. int rc;
  44. long idx;
  45. bool bit;
  46. do {
  47. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  48. if (idx >= CAM_SYNC_MAX_OBJS) {
  49. CAM_ERR(CAM_SYNC,
  50. "Error: Unable to create sync idx = %d reached max!",
  51. idx);
  52. cam_sync_print_fence_table();
  53. return -ENOMEM;
  54. }
  55. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  56. bit = test_and_set_bit(idx, sync_dev->bitmap);
  57. } while (bit);
  58. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  59. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  60. CAM_SYNC_TYPE_INDV);
  61. if (rc) {
  62. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  63. idx);
  64. clear_bit(idx, sync_dev->bitmap);
  65. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  66. return -EINVAL;
  67. }
  68. *sync_obj = idx;
  69. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  70. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  71. return rc;
  72. }
  73. int cam_sync_register_callback(sync_callback cb_func,
  74. void *userdata, int32_t sync_obj)
  75. {
  76. struct sync_callback_info *sync_cb;
  77. struct sync_table_row *row = NULL;
  78. int status = 0;
  79. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  80. return -EINVAL;
  81. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  82. row = sync_dev->sync_table + sync_obj;
  83. if (row->state == CAM_SYNC_STATE_INVALID) {
  84. CAM_ERR(CAM_SYNC,
  85. "Error: accessing an uninitialized sync obj %d",
  86. sync_obj);
  87. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  88. return -EINVAL;
  89. }
  90. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  91. if (!sync_cb) {
  92. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  93. return -ENOMEM;
  94. }
  95. /* Trigger callback if sync object is already in SIGNALED state */
  96. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  97. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  98. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  99. (!row->remaining)) {
  100. if (trigger_cb_without_switch) {
  101. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  102. sync_obj);
  103. status = row->state;
  104. kfree(sync_cb);
  105. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  106. cb_func(sync_obj, status, userdata);
  107. } else {
  108. sync_cb->callback_func = cb_func;
  109. sync_cb->cb_data = userdata;
  110. sync_cb->sync_obj = sync_obj;
  111. INIT_WORK(&sync_cb->cb_dispatch_work,
  112. cam_sync_util_cb_dispatch);
  113. sync_cb->status = row->state;
  114. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  115. sync_cb->sync_obj);
  116. queue_work(sync_dev->work_queue,
  117. &sync_cb->cb_dispatch_work);
  118. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  119. }
  120. return 0;
  121. }
  122. sync_cb->callback_func = cb_func;
  123. sync_cb->cb_data = userdata;
  124. sync_cb->sync_obj = sync_obj;
  125. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  126. list_add_tail(&sync_cb->list, &row->callback_list);
  127. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  128. return 0;
  129. }
  130. int cam_sync_deregister_callback(sync_callback cb_func,
  131. void *userdata, int32_t sync_obj)
  132. {
  133. struct sync_table_row *row = NULL;
  134. struct sync_callback_info *sync_cb, *temp;
  135. bool found = false;
  136. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  137. return -EINVAL;
  138. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  139. row = sync_dev->sync_table + sync_obj;
  140. if (row->state == CAM_SYNC_STATE_INVALID) {
  141. CAM_ERR(CAM_SYNC,
  142. "Error: accessing an uninitialized sync obj = %d",
  143. sync_obj);
  144. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  145. return -EINVAL;
  146. }
  147. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  148. sync_obj);
  149. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  150. if (sync_cb->callback_func == cb_func &&
  151. sync_cb->cb_data == userdata) {
  152. list_del_init(&sync_cb->list);
  153. kfree(sync_cb);
  154. found = true;
  155. }
  156. }
  157. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  158. return found ? 0 : -ENOENT;
  159. }
  160. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  161. {
  162. struct sync_table_row *row = NULL;
  163. struct sync_table_row *parent_row = NULL;
  164. struct sync_parent_info *parent_info, *temp_parent_info;
  165. struct list_head parents_list;
  166. int rc = 0;
  167. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  168. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  169. sync_obj, CAM_SYNC_MAX_OBJS);
  170. return -EINVAL;
  171. }
  172. row = sync_dev->sync_table + sync_obj;
  173. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  174. if (row->state == CAM_SYNC_STATE_INVALID) {
  175. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  176. CAM_ERR(CAM_SYNC,
  177. "Error: accessing an uninitialized sync obj = %d",
  178. sync_obj);
  179. return -EINVAL;
  180. }
  181. if (row->type == CAM_SYNC_TYPE_GROUP) {
  182. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  183. CAM_ERR(CAM_SYNC,
  184. "Error: Signaling a GROUP sync object = %d",
  185. sync_obj);
  186. return -EINVAL;
  187. }
  188. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  189. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  190. CAM_ERR(CAM_SYNC,
  191. "Error: Sync object already signaled sync_obj = %d",
  192. sync_obj);
  193. return -EALREADY;
  194. }
  195. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  196. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  197. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  198. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  199. CAM_ERR(CAM_SYNC,
  200. "Error: signaling with undefined status = %d event reason = %u",
  201. status, event_cause);
  202. return -EINVAL;
  203. }
  204. if (!atomic_dec_and_test(&row->ref_cnt)) {
  205. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  206. return 0;
  207. }
  208. row->state = status;
  209. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  210. /* copy parent list to local and release child lock */
  211. INIT_LIST_HEAD(&parents_list);
  212. list_splice_init(&row->parents_list, &parents_list);
  213. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  214. if (list_empty(&parents_list))
  215. return 0;
  216. /*
  217. * Now iterate over all parents of this object and if they too need to
  218. * be signaled dispatch cb's
  219. */
  220. list_for_each_entry_safe(parent_info,
  221. temp_parent_info,
  222. &parents_list,
  223. list) {
  224. parent_row = sync_dev->sync_table + parent_info->sync_id;
  225. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  226. parent_row->remaining--;
  227. rc = cam_sync_util_update_parent_state(
  228. parent_row,
  229. status);
  230. if (rc) {
  231. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  232. parent_row->state);
  233. spin_unlock_bh(
  234. &sync_dev->row_spinlocks[parent_info->sync_id]);
  235. kfree(parent_info);
  236. continue;
  237. }
  238. if (!parent_row->remaining)
  239. cam_sync_util_dispatch_signaled_cb(
  240. parent_info->sync_id, parent_row->state,
  241. event_cause);
  242. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  243. list_del_init(&parent_info->list);
  244. kfree(parent_info);
  245. }
  246. return 0;
  247. }
  248. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  249. {
  250. int rc;
  251. long idx = 0;
  252. bool bit;
  253. int i = 0;
  254. if (!sync_obj || !merged_obj) {
  255. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  256. return -EINVAL;
  257. }
  258. if (num_objs <= 1) {
  259. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  260. return -EINVAL;
  261. }
  262. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  263. != num_objs) {
  264. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  265. return -EINVAL;
  266. }
  267. for (i = 0; i < num_objs; i++) {
  268. rc = cam_sync_check_valid(sync_obj[i]);
  269. if (rc) {
  270. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  271. i, sync_obj[i]);
  272. return rc;
  273. }
  274. }
  275. do {
  276. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  277. if (idx >= CAM_SYNC_MAX_OBJS)
  278. return -ENOMEM;
  279. bit = test_and_set_bit(idx, sync_dev->bitmap);
  280. } while (bit);
  281. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  282. rc = cam_sync_init_group_object(sync_dev->sync_table,
  283. idx, sync_obj,
  284. num_objs);
  285. if (rc < 0) {
  286. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  287. idx);
  288. clear_bit(idx, sync_dev->bitmap);
  289. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  290. return -EINVAL;
  291. }
  292. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  293. *merged_obj = idx;
  294. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  295. return 0;
  296. }
  297. int cam_sync_get_obj_ref(int32_t sync_obj)
  298. {
  299. struct sync_table_row *row = NULL;
  300. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  301. return -EINVAL;
  302. row = sync_dev->sync_table + sync_obj;
  303. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  304. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  305. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  306. CAM_ERR(CAM_SYNC,
  307. "Error: accessing an uninitialized sync obj = %d",
  308. sync_obj);
  309. return -EINVAL;
  310. }
  311. atomic_inc(&row->ref_cnt);
  312. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  313. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  314. return 0;
  315. }
  316. int cam_sync_put_obj_ref(int32_t sync_obj)
  317. {
  318. struct sync_table_row *row = NULL;
  319. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  320. return -EINVAL;
  321. row = sync_dev->sync_table + sync_obj;
  322. atomic_dec(&row->ref_cnt);
  323. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  324. return 0;
  325. }
  326. int cam_sync_destroy(int32_t sync_obj)
  327. {
  328. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  329. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  330. }
  331. int cam_sync_check_valid(int32_t sync_obj)
  332. {
  333. struct sync_table_row *row = NULL;
  334. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  335. return -EINVAL;
  336. row = sync_dev->sync_table + sync_obj;
  337. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  338. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %d",
  339. sync_obj);
  340. return -EINVAL;
  341. }
  342. if (row->state == CAM_SYNC_STATE_INVALID) {
  343. CAM_ERR(CAM_SYNC,
  344. "Error: accessing an uninitialized sync obj = %d",
  345. sync_obj);
  346. return -EINVAL;
  347. }
  348. return 0;
  349. }
  350. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  351. {
  352. unsigned long timeleft;
  353. int rc = -EINVAL;
  354. struct sync_table_row *row = NULL;
  355. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  356. return -EINVAL;
  357. row = sync_dev->sync_table + sync_obj;
  358. if (row->state == CAM_SYNC_STATE_INVALID) {
  359. CAM_ERR(CAM_SYNC,
  360. "Error: accessing an uninitialized sync obj = %d",
  361. sync_obj);
  362. return -EINVAL;
  363. }
  364. timeleft = wait_for_completion_timeout(&row->signaled,
  365. msecs_to_jiffies(timeout_ms));
  366. if (!timeleft) {
  367. CAM_ERR(CAM_SYNC,
  368. "Error: timed out for sync obj = %d", sync_obj);
  369. rc = -ETIMEDOUT;
  370. } else {
  371. switch (row->state) {
  372. case CAM_SYNC_STATE_INVALID:
  373. case CAM_SYNC_STATE_ACTIVE:
  374. case CAM_SYNC_STATE_SIGNALED_ERROR:
  375. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  376. CAM_ERR(CAM_SYNC,
  377. "Error: Wait on invalid state = %d, obj = %d",
  378. row->state, sync_obj);
  379. rc = -EINVAL;
  380. break;
  381. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  382. rc = 0;
  383. break;
  384. default:
  385. rc = -EINVAL;
  386. break;
  387. }
  388. }
  389. return rc;
  390. }
  391. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  392. {
  393. struct cam_sync_info sync_create;
  394. int result;
  395. if (k_ioctl->size != sizeof(struct cam_sync_info))
  396. return -EINVAL;
  397. if (!k_ioctl->ioctl_ptr)
  398. return -EINVAL;
  399. if (copy_from_user(&sync_create,
  400. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  401. k_ioctl->size))
  402. return -EFAULT;
  403. result = cam_sync_create(&sync_create.sync_obj,
  404. sync_create.name);
  405. if (!result)
  406. if (copy_to_user(
  407. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  408. &sync_create,
  409. k_ioctl->size))
  410. return -EFAULT;
  411. return result;
  412. }
  413. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  414. {
  415. int rc = 0;
  416. struct cam_sync_signal sync_signal;
  417. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  418. return -EINVAL;
  419. if (!k_ioctl->ioctl_ptr)
  420. return -EINVAL;
  421. if (copy_from_user(&sync_signal,
  422. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  423. k_ioctl->size))
  424. return -EFAULT;
  425. /* need to get ref for UMD signaled fences */
  426. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  427. if (rc) {
  428. CAM_DBG(CAM_SYNC,
  429. "Error: cannot signal an uninitialized sync obj = %d",
  430. sync_signal.sync_obj);
  431. return rc;
  432. }
  433. return cam_sync_signal(sync_signal.sync_obj,
  434. sync_signal.sync_state,
  435. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  436. }
  437. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  438. {
  439. struct cam_sync_merge sync_merge;
  440. uint32_t *sync_objs;
  441. uint32_t num_objs;
  442. uint32_t size;
  443. int result;
  444. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  445. return -EINVAL;
  446. if (!k_ioctl->ioctl_ptr)
  447. return -EINVAL;
  448. if (copy_from_user(&sync_merge,
  449. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  450. k_ioctl->size))
  451. return -EFAULT;
  452. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  453. return -EINVAL;
  454. size = sizeof(uint32_t) * sync_merge.num_objs;
  455. sync_objs = kzalloc(size, GFP_ATOMIC);
  456. if (!sync_objs)
  457. return -ENOMEM;
  458. if (copy_from_user(sync_objs,
  459. u64_to_user_ptr(sync_merge.sync_objs),
  460. sizeof(uint32_t) * sync_merge.num_objs)) {
  461. kfree(sync_objs);
  462. return -EFAULT;
  463. }
  464. num_objs = sync_merge.num_objs;
  465. result = cam_sync_merge(sync_objs,
  466. num_objs,
  467. &sync_merge.merged);
  468. if (!result)
  469. if (copy_to_user(
  470. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  471. &sync_merge,
  472. k_ioctl->size)) {
  473. kfree(sync_objs);
  474. return -EFAULT;
  475. }
  476. kfree(sync_objs);
  477. return result;
  478. }
  479. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  480. {
  481. struct cam_sync_wait sync_wait;
  482. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  483. return -EINVAL;
  484. if (!k_ioctl->ioctl_ptr)
  485. return -EINVAL;
  486. if (copy_from_user(&sync_wait,
  487. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  488. k_ioctl->size))
  489. return -EFAULT;
  490. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  491. sync_wait.timeout_ms);
  492. return 0;
  493. }
  494. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  495. {
  496. struct cam_sync_info sync_create;
  497. if (k_ioctl->size != sizeof(struct cam_sync_info))
  498. return -EINVAL;
  499. if (!k_ioctl->ioctl_ptr)
  500. return -EINVAL;
  501. if (copy_from_user(&sync_create,
  502. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  503. k_ioctl->size))
  504. return -EFAULT;
  505. return cam_sync_destroy(sync_create.sync_obj);
  506. }
  507. static int cam_sync_handle_register_user_payload(
  508. struct cam_private_ioctl_arg *k_ioctl)
  509. {
  510. struct cam_sync_userpayload_info userpayload_info;
  511. struct sync_user_payload *user_payload_kernel;
  512. struct sync_user_payload *user_payload_iter;
  513. struct sync_user_payload *temp_upayload_kernel;
  514. uint32_t sync_obj;
  515. struct sync_table_row *row = NULL;
  516. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  517. return -EINVAL;
  518. if (!k_ioctl->ioctl_ptr)
  519. return -EINVAL;
  520. if (copy_from_user(&userpayload_info,
  521. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  522. k_ioctl->size))
  523. return -EFAULT;
  524. sync_obj = userpayload_info.sync_obj;
  525. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  526. return -EINVAL;
  527. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  528. if (!user_payload_kernel)
  529. return -ENOMEM;
  530. memcpy(user_payload_kernel->payload_data,
  531. userpayload_info.payload,
  532. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  533. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  534. row = sync_dev->sync_table + sync_obj;
  535. if (row->state == CAM_SYNC_STATE_INVALID) {
  536. CAM_ERR(CAM_SYNC,
  537. "Error: accessing an uninitialized sync obj = %d",
  538. sync_obj);
  539. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  540. kfree(user_payload_kernel);
  541. return -EINVAL;
  542. }
  543. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  544. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  545. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  546. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  547. sync_obj,
  548. row->state,
  549. user_payload_kernel->payload_data,
  550. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  551. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  552. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  553. kfree(user_payload_kernel);
  554. return 0;
  555. }
  556. list_for_each_entry_safe(user_payload_iter,
  557. temp_upayload_kernel,
  558. &row->user_payload_list,
  559. list) {
  560. if (user_payload_iter->payload_data[0] ==
  561. user_payload_kernel->payload_data[0] &&
  562. user_payload_iter->payload_data[1] ==
  563. user_payload_kernel->payload_data[1]) {
  564. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  565. kfree(user_payload_kernel);
  566. return -EALREADY;
  567. }
  568. }
  569. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  570. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  571. return 0;
  572. }
  573. static int cam_sync_handle_deregister_user_payload(
  574. struct cam_private_ioctl_arg *k_ioctl)
  575. {
  576. struct cam_sync_userpayload_info userpayload_info;
  577. struct sync_user_payload *user_payload_kernel, *temp;
  578. uint32_t sync_obj;
  579. struct sync_table_row *row = NULL;
  580. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  581. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  582. return -EINVAL;
  583. }
  584. if (!k_ioctl->ioctl_ptr) {
  585. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  586. return -EINVAL;
  587. }
  588. if (copy_from_user(&userpayload_info,
  589. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  590. k_ioctl->size))
  591. return -EFAULT;
  592. sync_obj = userpayload_info.sync_obj;
  593. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  594. return -EINVAL;
  595. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  596. row = sync_dev->sync_table + sync_obj;
  597. if (row->state == CAM_SYNC_STATE_INVALID) {
  598. CAM_ERR(CAM_SYNC,
  599. "Error: accessing an uninitialized sync obj = %d",
  600. sync_obj);
  601. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  602. return -EINVAL;
  603. }
  604. list_for_each_entry_safe(user_payload_kernel, temp,
  605. &row->user_payload_list, list) {
  606. if (user_payload_kernel->payload_data[0] ==
  607. userpayload_info.payload[0] &&
  608. user_payload_kernel->payload_data[1] ==
  609. userpayload_info.payload[1]) {
  610. list_del_init(&user_payload_kernel->list);
  611. kfree(user_payload_kernel);
  612. }
  613. }
  614. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  615. return 0;
  616. }
  617. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  618. bool valid_prio, unsigned int cmd, void *arg)
  619. {
  620. int32_t rc;
  621. struct sync_device *sync_dev = video_drvdata(filep);
  622. struct cam_private_ioctl_arg k_ioctl;
  623. if (!sync_dev) {
  624. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  625. return -EINVAL;
  626. }
  627. if (!arg)
  628. return -EINVAL;
  629. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  630. return -ENOIOCTLCMD;
  631. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  632. switch (k_ioctl.id) {
  633. case CAM_SYNC_CREATE:
  634. rc = cam_sync_handle_create(&k_ioctl);
  635. break;
  636. case CAM_SYNC_DESTROY:
  637. rc = cam_sync_handle_destroy(&k_ioctl);
  638. break;
  639. case CAM_SYNC_REGISTER_PAYLOAD:
  640. rc = cam_sync_handle_register_user_payload(
  641. &k_ioctl);
  642. break;
  643. case CAM_SYNC_DEREGISTER_PAYLOAD:
  644. rc = cam_sync_handle_deregister_user_payload(
  645. &k_ioctl);
  646. break;
  647. case CAM_SYNC_SIGNAL:
  648. rc = cam_sync_handle_signal(&k_ioctl);
  649. break;
  650. case CAM_SYNC_MERGE:
  651. rc = cam_sync_handle_merge(&k_ioctl);
  652. break;
  653. case CAM_SYNC_WAIT:
  654. rc = cam_sync_handle_wait(&k_ioctl);
  655. ((struct cam_private_ioctl_arg *)arg)->result =
  656. k_ioctl.result;
  657. break;
  658. default:
  659. rc = -ENOIOCTLCMD;
  660. }
  661. return rc;
  662. }
  663. static unsigned int cam_sync_poll(struct file *f,
  664. struct poll_table_struct *pll_table)
  665. {
  666. int rc = 0;
  667. struct v4l2_fh *eventq = f->private_data;
  668. if (!eventq)
  669. return -EINVAL;
  670. poll_wait(f, &eventq->wait, pll_table);
  671. if (v4l2_event_pending(eventq))
  672. rc = POLLPRI;
  673. return rc;
  674. }
  675. static int cam_sync_open(struct file *filep)
  676. {
  677. int rc;
  678. struct sync_device *sync_dev = video_drvdata(filep);
  679. if (!sync_dev) {
  680. CAM_ERR(CAM_SYNC, "Sync device NULL");
  681. return -ENODEV;
  682. }
  683. mutex_lock(&sync_dev->table_lock);
  684. if (sync_dev->open_cnt >= 1) {
  685. mutex_unlock(&sync_dev->table_lock);
  686. return -EALREADY;
  687. }
  688. rc = v4l2_fh_open(filep);
  689. if (!rc) {
  690. sync_dev->open_cnt++;
  691. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  692. sync_dev->cam_sync_eventq = filep->private_data;
  693. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  694. } else {
  695. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  696. }
  697. mutex_unlock(&sync_dev->table_lock);
  698. return rc;
  699. }
  700. static int cam_sync_close(struct file *filep)
  701. {
  702. int rc = 0;
  703. int i;
  704. struct sync_device *sync_dev = video_drvdata(filep);
  705. if (!sync_dev) {
  706. CAM_ERR(CAM_SYNC, "Sync device NULL");
  707. rc = -ENODEV;
  708. return rc;
  709. }
  710. mutex_lock(&sync_dev->table_lock);
  711. sync_dev->open_cnt--;
  712. if (!sync_dev->open_cnt) {
  713. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  714. struct sync_table_row *row =
  715. sync_dev->sync_table + i;
  716. /*
  717. * Signal all ACTIVE objects as ERR, but we don't
  718. * care about the return status here apart from logging
  719. * it.
  720. */
  721. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  722. rc = cam_sync_signal(i,
  723. CAM_SYNC_STATE_SIGNALED_ERROR,
  724. CAM_SYNC_COMMON_RELEASE_EVENT);
  725. if (rc < 0)
  726. CAM_ERR(CAM_SYNC,
  727. "Cleanup signal fail idx:%d\n",
  728. i);
  729. }
  730. }
  731. /*
  732. * Flush the work queue to wait for pending signal callbacks to
  733. * finish
  734. */
  735. flush_workqueue(sync_dev->work_queue);
  736. /*
  737. * Now that all callbacks worker threads have finished,
  738. * destroy the sync objects
  739. */
  740. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  741. struct sync_table_row *row =
  742. sync_dev->sync_table + i;
  743. if (row->state != CAM_SYNC_STATE_INVALID) {
  744. rc = cam_sync_destroy(i);
  745. if (rc < 0)
  746. CAM_ERR(CAM_SYNC,
  747. "Cleanup destroy fail:idx:%d\n",
  748. i);
  749. }
  750. }
  751. }
  752. mutex_unlock(&sync_dev->table_lock);
  753. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  754. sync_dev->cam_sync_eventq = NULL;
  755. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  756. v4l2_fh_release(filep);
  757. return rc;
  758. }
  759. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  760. struct v4l2_event *new)
  761. {
  762. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  763. struct cam_sync_ev_header_v2 *ev_header;
  764. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  765. CAM_ERR(CAM_CRM,
  766. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  767. old->id, ev_header->sync_obj, ev_header->status,
  768. ev_header->evt_param[0], ev_header->evt_param[1],
  769. ev_header->evt_param[2], ev_header->evt_param[3]);
  770. } else {
  771. struct cam_sync_ev_header *ev_header;
  772. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  773. CAM_ERR(CAM_CRM,
  774. "Failed to notify event id %d fence %d statue %d",
  775. old->id, ev_header->sync_obj, ev_header->status);
  776. }
  777. }
  778. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  779. .merge = cam_sync_event_queue_notify_error,
  780. };
  781. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  782. const struct v4l2_event_subscription *sub)
  783. {
  784. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  785. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  786. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  787. return -EINVAL;
  788. }
  789. sync_dev->version = sub->type;
  790. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  791. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  792. &cam_sync_v4l2_ops);
  793. }
  794. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  795. const struct v4l2_event_subscription *sub)
  796. {
  797. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  798. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  799. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  800. return -EINVAL;
  801. }
  802. return v4l2_event_unsubscribe(fh, sub);
  803. }
  804. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  805. .vidioc_subscribe_event = cam_sync_subscribe_event,
  806. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  807. .vidioc_default = cam_sync_dev_ioctl,
  808. };
  809. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  810. .owner = THIS_MODULE,
  811. .open = cam_sync_open,
  812. .release = cam_sync_close,
  813. .poll = cam_sync_poll,
  814. .unlocked_ioctl = video_ioctl2,
  815. #ifdef CONFIG_COMPAT
  816. .compat_ioctl32 = video_ioctl2,
  817. #endif
  818. };
  819. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  820. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  821. struct platform_device *pdev)
  822. {
  823. int rc;
  824. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  825. GFP_KERNEL);
  826. if (!sync_dev->v4l2_dev.mdev)
  827. return -ENOMEM;
  828. media_device_init(sync_dev->v4l2_dev.mdev);
  829. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  830. sizeof(sync_dev->v4l2_dev.mdev->model));
  831. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  832. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  833. if (rc < 0)
  834. goto register_fail;
  835. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  836. if (rc < 0)
  837. goto entity_fail;
  838. return 0;
  839. entity_fail:
  840. media_device_unregister(sync_dev->v4l2_dev.mdev);
  841. register_fail:
  842. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  843. return rc;
  844. }
  845. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  846. {
  847. media_entity_cleanup(&sync_dev->vdev->entity);
  848. media_device_unregister(sync_dev->v4l2_dev.mdev);
  849. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  850. kfree(sync_dev->v4l2_dev.mdev);
  851. }
  852. static void cam_sync_init_entity(struct sync_device *sync_dev)
  853. {
  854. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  855. sync_dev->vdev->entity.name =
  856. video_device_node_name(sync_dev->vdev);
  857. }
  858. #else
  859. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  860. struct platform_device *pdev)
  861. {
  862. return 0;
  863. }
  864. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  865. {
  866. }
  867. static void cam_sync_init_entity(struct sync_device *sync_dev)
  868. {
  869. }
  870. #endif
  871. static int cam_sync_create_debugfs(void)
  872. {
  873. int rc = 0;
  874. struct dentry *dbgfileptr = NULL;
  875. dbgfileptr = debugfs_create_dir("camera_sync", NULL);
  876. if (!dbgfileptr) {
  877. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  878. rc = -ENOENT;
  879. goto end;
  880. }
  881. /* Store parent inode for cleanup in caller */
  882. sync_dev->dentry = dbgfileptr;
  883. dbgfileptr = debugfs_create_bool("trigger_cb_without_switch", 0644,
  884. sync_dev->dentry, &trigger_cb_without_switch);
  885. if (IS_ERR(dbgfileptr)) {
  886. if (PTR_ERR(dbgfileptr) == -ENODEV)
  887. CAM_WARN(CAM_SYNC, "DebugFS not enabled in kernel!");
  888. else
  889. rc = PTR_ERR(dbgfileptr);
  890. }
  891. end:
  892. return rc;
  893. }
  894. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  895. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  896. {
  897. int rc = 0;
  898. uint32_t sync_status = synx_status;
  899. switch (synx_status) {
  900. case SYNX_STATE_ACTIVE:
  901. sync_status = CAM_SYNC_STATE_ACTIVE;
  902. break;
  903. case SYNX_STATE_SIGNALED_SUCCESS:
  904. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  905. break;
  906. case SYNX_STATE_SIGNALED_ERROR:
  907. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  908. break;
  909. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  910. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  911. break;
  912. default:
  913. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  914. synx_status, sync_obj);
  915. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  916. break;
  917. }
  918. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  919. if (rc) {
  920. CAM_ERR(CAM_SYNC,
  921. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  922. sync_obj, synx_status, sync_status, rc);
  923. }
  924. return rc;
  925. }
  926. static int cam_sync_register_synx_bind_ops(
  927. struct synx_register_params *object)
  928. {
  929. int rc = 0;
  930. rc = synx_register_ops(object);
  931. if (rc)
  932. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  933. return rc;
  934. }
  935. static void cam_sync_unregister_synx_bind_ops(
  936. struct synx_register_params *object)
  937. {
  938. int rc = 0;
  939. rc = synx_deregister_ops(object);
  940. if (rc)
  941. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  942. }
  943. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  944. {
  945. struct synx_register_params *params = object;
  946. params->name = CAM_SYNC_NAME;
  947. params->type = SYNX_TYPE_CSL;
  948. params->ops.register_callback = cam_sync_register_callback;
  949. params->ops.deregister_callback = cam_sync_deregister_callback;
  950. params->ops.enable_signaling = cam_sync_get_obj_ref;
  951. params->ops.signal = cam_synx_sync_signal;
  952. }
  953. #endif
  954. static int cam_sync_component_bind(struct device *dev,
  955. struct device *master_dev, void *data)
  956. {
  957. int rc;
  958. int idx;
  959. struct platform_device *pdev = to_platform_device(dev);
  960. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  961. if (!sync_dev)
  962. return -ENOMEM;
  963. mutex_init(&sync_dev->table_lock);
  964. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  965. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  966. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  967. sync_dev->vdev = video_device_alloc();
  968. if (!sync_dev->vdev) {
  969. rc = -ENOMEM;
  970. goto vdev_fail;
  971. }
  972. rc = cam_sync_media_controller_init(sync_dev, pdev);
  973. if (rc < 0)
  974. goto mcinit_fail;
  975. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  976. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  977. if (rc < 0)
  978. goto register_fail;
  979. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  980. sizeof(sync_dev->vdev->name));
  981. sync_dev->vdev->release = video_device_release_empty;
  982. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  983. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  984. sync_dev->vdev->minor = -1;
  985. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  986. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  987. rc = video_register_device(sync_dev->vdev,
  988. VFL_TYPE_GRABBER, -1);
  989. if (rc < 0) {
  990. CAM_ERR(CAM_SYNC,
  991. "video device registration failure rc = %d, name = %s, device_caps = %d",
  992. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  993. goto v4l2_fail;
  994. }
  995. cam_sync_init_entity(sync_dev);
  996. video_set_drvdata(sync_dev->vdev, sync_dev);
  997. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  998. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  999. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  1000. /*
  1001. * We treat zero as invalid handle, so we will keep the 0th bit set
  1002. * always
  1003. */
  1004. set_bit(0, sync_dev->bitmap);
  1005. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  1006. WQ_HIGHPRI | WQ_UNBOUND, 1);
  1007. if (!sync_dev->work_queue) {
  1008. CAM_ERR(CAM_SYNC,
  1009. "Error: high priority work queue creation failed");
  1010. rc = -ENOMEM;
  1011. goto v4l2_fail;
  1012. }
  1013. trigger_cb_without_switch = false;
  1014. cam_sync_create_debugfs();
  1015. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1016. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  1017. cam_sync_configure_synx_obj(&sync_dev->params);
  1018. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  1019. if (rc)
  1020. goto v4l2_fail;
  1021. #endif
  1022. CAM_DBG(CAM_SYNC, "Component bound successfully");
  1023. return rc;
  1024. v4l2_fail:
  1025. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1026. register_fail:
  1027. cam_sync_media_controller_cleanup(sync_dev);
  1028. mcinit_fail:
  1029. video_unregister_device(sync_dev->vdev);
  1030. video_device_release(sync_dev->vdev);
  1031. vdev_fail:
  1032. mutex_destroy(&sync_dev->table_lock);
  1033. kfree(sync_dev);
  1034. return rc;
  1035. }
  1036. static void cam_sync_component_unbind(struct device *dev,
  1037. struct device *master_dev, void *data)
  1038. {
  1039. int i;
  1040. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1041. cam_sync_media_controller_cleanup(sync_dev);
  1042. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1043. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1044. #endif
  1045. video_unregister_device(sync_dev->vdev);
  1046. video_device_release(sync_dev->vdev);
  1047. debugfs_remove_recursive(sync_dev->dentry);
  1048. sync_dev->dentry = NULL;
  1049. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1050. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1051. kfree(sync_dev);
  1052. sync_dev = NULL;
  1053. }
  1054. const static struct component_ops cam_sync_component_ops = {
  1055. .bind = cam_sync_component_bind,
  1056. .unbind = cam_sync_component_unbind,
  1057. };
  1058. static int cam_sync_probe(struct platform_device *pdev)
  1059. {
  1060. int rc = 0;
  1061. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1062. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1063. if (rc)
  1064. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1065. return rc;
  1066. }
  1067. static int cam_sync_remove(struct platform_device *pdev)
  1068. {
  1069. component_del(&pdev->dev, &cam_sync_component_ops);
  1070. return 0;
  1071. }
  1072. static const struct of_device_id cam_sync_dt_match[] = {
  1073. {.compatible = "qcom,cam-sync"},
  1074. {}
  1075. };
  1076. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1077. struct platform_driver cam_sync_driver = {
  1078. .probe = cam_sync_probe,
  1079. .remove = cam_sync_remove,
  1080. .driver = {
  1081. .name = "cam_sync",
  1082. .owner = THIS_MODULE,
  1083. .of_match_table = cam_sync_dt_match,
  1084. .suppress_bind_attrs = true,
  1085. },
  1086. };
  1087. int cam_sync_init(void)
  1088. {
  1089. return platform_driver_register(&cam_sync_driver);
  1090. }
  1091. void cam_sync_exit(void)
  1092. {
  1093. platform_driver_unregister(&cam_sync_driver);
  1094. }
  1095. MODULE_DESCRIPTION("Camera sync driver");
  1096. MODULE_LICENSE("GPL v2");