cam_sync.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "cam_compat.h"
  18. #include "camera_main.h"
  19. #include "cam_req_mgr_workq.h"
  20. struct sync_device *sync_dev;
  21. /*
  22. * Flag to determine whether to enqueue cb of a
  23. * signaled fence onto the workq or invoke it
  24. * directly in the same context
  25. */
  26. static bool trigger_cb_without_switch;
  27. static void cam_sync_print_fence_table(void)
  28. {
  29. int idx;
  30. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  31. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  32. CAM_INFO(CAM_SYNC,
  33. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  34. idx,
  35. sync_dev->sync_table[idx].sync_id,
  36. sync_dev->sync_table[idx].name,
  37. sync_dev->sync_table[idx].type,
  38. sync_dev->sync_table[idx].state,
  39. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  40. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  41. }
  42. }
  43. int cam_sync_create(int32_t *sync_obj, const char *name)
  44. {
  45. int rc;
  46. long idx;
  47. bool bit;
  48. do {
  49. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  50. if (idx >= CAM_SYNC_MAX_OBJS) {
  51. CAM_ERR(CAM_SYNC,
  52. "Error: Unable to create sync idx = %d sync name = %s reached max!",
  53. idx, name);
  54. cam_sync_print_fence_table();
  55. return -ENOMEM;
  56. }
  57. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  58. bit = test_and_set_bit(idx, sync_dev->bitmap);
  59. } while (bit);
  60. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  61. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  62. CAM_SYNC_TYPE_INDV);
  63. if (rc) {
  64. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  65. idx);
  66. clear_bit(idx, sync_dev->bitmap);
  67. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  68. return -EINVAL;
  69. }
  70. *sync_obj = idx;
  71. CAM_DBG(CAM_SYNC, "sync_obj: %s[%i]", name, *sync_obj);
  72. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  73. return rc;
  74. }
  75. int cam_sync_register_callback(sync_callback cb_func,
  76. void *userdata, int32_t sync_obj)
  77. {
  78. struct sync_callback_info *sync_cb;
  79. struct sync_table_row *row = NULL;
  80. int status = 0;
  81. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  82. return -EINVAL;
  83. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  84. row = sync_dev->sync_table + sync_obj;
  85. if (row->state == CAM_SYNC_STATE_INVALID) {
  86. CAM_ERR(CAM_SYNC,
  87. "Error: accessing an uninitialized sync obj %s[%d]",
  88. row->name,
  89. sync_obj);
  90. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  91. return -EINVAL;
  92. }
  93. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  94. if (!sync_cb) {
  95. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  96. return -ENOMEM;
  97. }
  98. /* Trigger callback if sync object is already in SIGNALED state */
  99. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  100. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  101. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  102. (!row->remaining)) {
  103. if (trigger_cb_without_switch) {
  104. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]",
  105. row->name,
  106. sync_obj);
  107. status = row->state;
  108. kfree(sync_cb);
  109. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  110. cb_func(sync_obj, status, userdata);
  111. } else {
  112. sync_cb->callback_func = cb_func;
  113. sync_cb->cb_data = userdata;
  114. sync_cb->sync_obj = sync_obj;
  115. INIT_WORK(&sync_cb->cb_dispatch_work,
  116. cam_sync_util_cb_dispatch);
  117. sync_cb->status = row->state;
  118. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%s[%d]",
  119. row->name,
  120. sync_cb->sync_obj);
  121. sync_cb->workq_scheduled_ts = ktime_get();
  122. queue_work(sync_dev->work_queue,
  123. &sync_cb->cb_dispatch_work);
  124. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  125. }
  126. return 0;
  127. }
  128. sync_cb->callback_func = cb_func;
  129. sync_cb->cb_data = userdata;
  130. sync_cb->sync_obj = sync_obj;
  131. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  132. list_add_tail(&sync_cb->list, &row->callback_list);
  133. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  134. return 0;
  135. }
  136. int cam_sync_deregister_callback(sync_callback cb_func,
  137. void *userdata, int32_t sync_obj)
  138. {
  139. struct sync_table_row *row = NULL;
  140. struct sync_callback_info *sync_cb, *temp;
  141. bool found = false;
  142. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  143. return -EINVAL;
  144. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  145. row = sync_dev->sync_table + sync_obj;
  146. if (row->state == CAM_SYNC_STATE_INVALID) {
  147. CAM_ERR(CAM_SYNC,
  148. "Error: accessing an uninitialized sync obj = %s[%d]",
  149. row->name,
  150. sync_obj);
  151. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  152. return -EINVAL;
  153. }
  154. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]",
  155. row->name,
  156. sync_obj);
  157. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  158. if (sync_cb->callback_func == cb_func &&
  159. sync_cb->cb_data == userdata) {
  160. list_del_init(&sync_cb->list);
  161. kfree(sync_cb);
  162. found = true;
  163. }
  164. }
  165. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  166. return found ? 0 : -ENOENT;
  167. }
  168. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  169. {
  170. struct sync_table_row *row = NULL;
  171. struct sync_table_row *parent_row = NULL;
  172. struct sync_parent_info *parent_info, *temp_parent_info;
  173. struct list_head parents_list;
  174. int rc = 0;
  175. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  176. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  177. sync_obj, CAM_SYNC_MAX_OBJS);
  178. return -EINVAL;
  179. }
  180. row = sync_dev->sync_table + sync_obj;
  181. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  182. if (row->state == CAM_SYNC_STATE_INVALID) {
  183. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  184. CAM_ERR(CAM_SYNC,
  185. "Error: accessing an uninitialized sync obj = %s[%d]",
  186. row->name,
  187. sync_obj);
  188. return -EINVAL;
  189. }
  190. if (row->type == CAM_SYNC_TYPE_GROUP) {
  191. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  192. CAM_ERR(CAM_SYNC,
  193. "Error: Signaling a GROUP sync object = %s[%d]",
  194. row->name,
  195. sync_obj);
  196. return -EINVAL;
  197. }
  198. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  199. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  200. CAM_ERR(CAM_SYNC,
  201. "Error: Sync object already signaled sync_obj = %s[%d]",
  202. row->name,
  203. sync_obj);
  204. return -EALREADY;
  205. }
  206. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  207. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  208. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  209. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  210. CAM_ERR(CAM_SYNC,
  211. "Error: signaling with undefined status = %d event reason = %u",
  212. status, event_cause);
  213. return -EINVAL;
  214. }
  215. if (!atomic_dec_and_test(&row->ref_cnt)) {
  216. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  217. return 0;
  218. }
  219. row->state = status;
  220. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  221. /* copy parent list to local and release child lock */
  222. INIT_LIST_HEAD(&parents_list);
  223. list_splice_init(&row->parents_list, &parents_list);
  224. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  225. if (list_empty(&parents_list))
  226. return 0;
  227. /*
  228. * Now iterate over all parents of this object and if they too need to
  229. * be signaled dispatch cb's
  230. */
  231. list_for_each_entry_safe(parent_info,
  232. temp_parent_info,
  233. &parents_list,
  234. list) {
  235. parent_row = sync_dev->sync_table + parent_info->sync_id;
  236. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  237. parent_row->remaining--;
  238. rc = cam_sync_util_update_parent_state(
  239. parent_row,
  240. status);
  241. if (rc) {
  242. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  243. parent_row->state);
  244. spin_unlock_bh(
  245. &sync_dev->row_spinlocks[parent_info->sync_id]);
  246. kfree(parent_info);
  247. continue;
  248. }
  249. if (!parent_row->remaining)
  250. cam_sync_util_dispatch_signaled_cb(
  251. parent_info->sync_id, parent_row->state,
  252. event_cause);
  253. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  254. list_del_init(&parent_info->list);
  255. kfree(parent_info);
  256. }
  257. return 0;
  258. }
  259. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  260. {
  261. int rc;
  262. long idx = 0;
  263. bool bit;
  264. int i = 0;
  265. if (!sync_obj || !merged_obj) {
  266. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  267. return -EINVAL;
  268. }
  269. if (num_objs <= 1) {
  270. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  271. return -EINVAL;
  272. }
  273. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  274. != num_objs) {
  275. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  276. return -EINVAL;
  277. }
  278. for (i = 0; i < num_objs; i++) {
  279. rc = cam_sync_check_valid(sync_obj[i]);
  280. if (rc) {
  281. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  282. i, sync_obj[i]);
  283. return rc;
  284. }
  285. }
  286. do {
  287. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  288. if (idx >= CAM_SYNC_MAX_OBJS)
  289. return -ENOMEM;
  290. bit = test_and_set_bit(idx, sync_dev->bitmap);
  291. } while (bit);
  292. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  293. rc = cam_sync_init_group_object(sync_dev->sync_table,
  294. idx, sync_obj,
  295. num_objs);
  296. if (rc < 0) {
  297. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  298. idx);
  299. clear_bit(idx, sync_dev->bitmap);
  300. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  301. return -EINVAL;
  302. }
  303. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  304. *merged_obj = idx;
  305. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  306. return 0;
  307. }
  308. int cam_sync_get_obj_ref(int32_t sync_obj)
  309. {
  310. struct sync_table_row *row = NULL;
  311. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  312. return -EINVAL;
  313. row = sync_dev->sync_table + sync_obj;
  314. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  315. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  316. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  317. CAM_ERR(CAM_SYNC,
  318. "Error: accessing an uninitialized sync obj = %s[%d]",
  319. row->name,
  320. sync_obj);
  321. return -EINVAL;
  322. }
  323. atomic_inc(&row->ref_cnt);
  324. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  325. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  326. return 0;
  327. }
  328. int cam_sync_put_obj_ref(int32_t sync_obj)
  329. {
  330. struct sync_table_row *row = NULL;
  331. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  332. return -EINVAL;
  333. row = sync_dev->sync_table + sync_obj;
  334. atomic_dec(&row->ref_cnt);
  335. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  336. return 0;
  337. }
  338. int cam_sync_destroy(int32_t sync_obj)
  339. {
  340. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  341. }
  342. int cam_sync_check_valid(int32_t sync_obj)
  343. {
  344. struct sync_table_row *row = NULL;
  345. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  346. return -EINVAL;
  347. row = sync_dev->sync_table + sync_obj;
  348. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  349. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]",
  350. row->name,
  351. sync_obj);
  352. return -EINVAL;
  353. }
  354. if (row->state == CAM_SYNC_STATE_INVALID) {
  355. CAM_ERR(CAM_SYNC,
  356. "Error: accessing an uninitialized sync obj = %s[%d]",
  357. row->name,
  358. sync_obj);
  359. return -EINVAL;
  360. }
  361. return 0;
  362. }
  363. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  364. {
  365. unsigned long timeleft;
  366. int rc = -EINVAL;
  367. struct sync_table_row *row = NULL;
  368. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  369. return -EINVAL;
  370. row = sync_dev->sync_table + sync_obj;
  371. if (row->state == CAM_SYNC_STATE_INVALID) {
  372. CAM_ERR(CAM_SYNC,
  373. "Error: accessing an uninitialized sync obj = %s[%d]",
  374. row->name,
  375. sync_obj);
  376. return -EINVAL;
  377. }
  378. timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
  379. msecs_to_jiffies(timeout_ms));
  380. if (!timeleft) {
  381. CAM_ERR(CAM_SYNC,
  382. "Error: timed out for sync obj = %s[%d]", row->name, sync_obj);
  383. rc = -ETIMEDOUT;
  384. } else {
  385. switch (row->state) {
  386. case CAM_SYNC_STATE_INVALID:
  387. case CAM_SYNC_STATE_ACTIVE:
  388. case CAM_SYNC_STATE_SIGNALED_ERROR:
  389. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  390. CAM_ERR(CAM_SYNC,
  391. "Error: Wait on invalid state = %d, obj = %d, name = %s",
  392. row->state, sync_obj, row->name);
  393. rc = -EINVAL;
  394. break;
  395. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  396. rc = 0;
  397. break;
  398. default:
  399. rc = -EINVAL;
  400. break;
  401. }
  402. }
  403. return rc;
  404. }
  405. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  406. {
  407. struct cam_sync_info sync_create;
  408. int result;
  409. if (k_ioctl->size != sizeof(struct cam_sync_info))
  410. return -EINVAL;
  411. if (!k_ioctl->ioctl_ptr)
  412. return -EINVAL;
  413. if (copy_from_user(&sync_create,
  414. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  415. k_ioctl->size))
  416. return -EFAULT;
  417. sync_create.name[SYNC_DEBUG_NAME_LEN] = '\0';
  418. result = cam_sync_create(&sync_create.sync_obj,
  419. sync_create.name);
  420. if (!result)
  421. if (copy_to_user(
  422. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  423. &sync_create,
  424. k_ioctl->size))
  425. return -EFAULT;
  426. return result;
  427. }
  428. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  429. {
  430. int rc = 0;
  431. struct cam_sync_signal sync_signal;
  432. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  433. return -EINVAL;
  434. if (!k_ioctl->ioctl_ptr)
  435. return -EINVAL;
  436. if (copy_from_user(&sync_signal,
  437. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  438. k_ioctl->size))
  439. return -EFAULT;
  440. /* need to get ref for UMD signaled fences */
  441. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  442. if (rc) {
  443. CAM_DBG(CAM_SYNC,
  444. "Error: cannot signal an uninitialized sync obj = %d",
  445. sync_signal.sync_obj);
  446. return rc;
  447. }
  448. return cam_sync_signal(sync_signal.sync_obj,
  449. sync_signal.sync_state,
  450. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  451. }
  452. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  453. {
  454. struct cam_sync_merge sync_merge;
  455. uint32_t *sync_objs;
  456. uint32_t num_objs;
  457. uint32_t size;
  458. int result;
  459. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  460. return -EINVAL;
  461. if (!k_ioctl->ioctl_ptr)
  462. return -EINVAL;
  463. if (copy_from_user(&sync_merge,
  464. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  465. k_ioctl->size))
  466. return -EFAULT;
  467. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  468. return -EINVAL;
  469. size = sizeof(uint32_t) * sync_merge.num_objs;
  470. sync_objs = kzalloc(size, GFP_ATOMIC);
  471. if (!sync_objs)
  472. return -ENOMEM;
  473. if (copy_from_user(sync_objs,
  474. u64_to_user_ptr(sync_merge.sync_objs),
  475. sizeof(uint32_t) * sync_merge.num_objs)) {
  476. kfree(sync_objs);
  477. return -EFAULT;
  478. }
  479. num_objs = sync_merge.num_objs;
  480. result = cam_sync_merge(sync_objs,
  481. num_objs,
  482. &sync_merge.merged);
  483. if (!result)
  484. if (copy_to_user(
  485. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  486. &sync_merge,
  487. k_ioctl->size)) {
  488. kfree(sync_objs);
  489. return -EFAULT;
  490. }
  491. kfree(sync_objs);
  492. return result;
  493. }
  494. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  495. {
  496. struct cam_sync_wait sync_wait;
  497. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  498. return -EINVAL;
  499. if (!k_ioctl->ioctl_ptr)
  500. return -EINVAL;
  501. if (copy_from_user(&sync_wait,
  502. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  503. k_ioctl->size))
  504. return -EFAULT;
  505. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  506. sync_wait.timeout_ms);
  507. return 0;
  508. }
  509. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  510. {
  511. struct cam_sync_info sync_create;
  512. if (k_ioctl->size != sizeof(struct cam_sync_info))
  513. return -EINVAL;
  514. if (!k_ioctl->ioctl_ptr)
  515. return -EINVAL;
  516. if (copy_from_user(&sync_create,
  517. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  518. k_ioctl->size))
  519. return -EFAULT;
  520. return cam_sync_destroy(sync_create.sync_obj);
  521. }
  522. static int cam_sync_handle_register_user_payload(
  523. struct cam_private_ioctl_arg *k_ioctl)
  524. {
  525. struct cam_sync_userpayload_info userpayload_info;
  526. struct sync_user_payload *user_payload_kernel;
  527. struct sync_user_payload *user_payload_iter;
  528. struct sync_user_payload *temp_upayload_kernel;
  529. uint32_t sync_obj;
  530. struct sync_table_row *row = NULL;
  531. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  532. return -EINVAL;
  533. if (!k_ioctl->ioctl_ptr)
  534. return -EINVAL;
  535. if (copy_from_user(&userpayload_info,
  536. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  537. k_ioctl->size))
  538. return -EFAULT;
  539. sync_obj = userpayload_info.sync_obj;
  540. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  541. return -EINVAL;
  542. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  543. if (!user_payload_kernel)
  544. return -ENOMEM;
  545. memcpy(user_payload_kernel->payload_data,
  546. userpayload_info.payload,
  547. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  548. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  549. row = sync_dev->sync_table + sync_obj;
  550. if (row->state == CAM_SYNC_STATE_INVALID) {
  551. CAM_ERR(CAM_SYNC,
  552. "Error: accessing an uninitialized sync obj = %s[%d]",
  553. row->name,
  554. sync_obj);
  555. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  556. kfree(user_payload_kernel);
  557. return -EINVAL;
  558. }
  559. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  560. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  561. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  562. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  563. sync_obj,
  564. row->state,
  565. user_payload_kernel->payload_data,
  566. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  567. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  568. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  569. kfree(user_payload_kernel);
  570. return 0;
  571. }
  572. list_for_each_entry_safe(user_payload_iter,
  573. temp_upayload_kernel,
  574. &row->user_payload_list,
  575. list) {
  576. if (user_payload_iter->payload_data[0] ==
  577. user_payload_kernel->payload_data[0] &&
  578. user_payload_iter->payload_data[1] ==
  579. user_payload_kernel->payload_data[1]) {
  580. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  581. kfree(user_payload_kernel);
  582. return -EALREADY;
  583. }
  584. }
  585. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  586. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  587. return 0;
  588. }
  589. static int cam_sync_handle_deregister_user_payload(
  590. struct cam_private_ioctl_arg *k_ioctl)
  591. {
  592. struct cam_sync_userpayload_info userpayload_info;
  593. struct sync_user_payload *user_payload_kernel, *temp;
  594. uint32_t sync_obj;
  595. struct sync_table_row *row = NULL;
  596. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  597. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  598. return -EINVAL;
  599. }
  600. if (!k_ioctl->ioctl_ptr) {
  601. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  602. return -EINVAL;
  603. }
  604. if (copy_from_user(&userpayload_info,
  605. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  606. k_ioctl->size))
  607. return -EFAULT;
  608. sync_obj = userpayload_info.sync_obj;
  609. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  610. return -EINVAL;
  611. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  612. row = sync_dev->sync_table + sync_obj;
  613. if (row->state == CAM_SYNC_STATE_INVALID) {
  614. CAM_ERR(CAM_SYNC,
  615. "Error: accessing an uninitialized sync obj = %s[%d]",
  616. row->name,
  617. sync_obj);
  618. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  619. return -EINVAL;
  620. }
  621. list_for_each_entry_safe(user_payload_kernel, temp,
  622. &row->user_payload_list, list) {
  623. if (user_payload_kernel->payload_data[0] ==
  624. userpayload_info.payload[0] &&
  625. user_payload_kernel->payload_data[1] ==
  626. userpayload_info.payload[1]) {
  627. list_del_init(&user_payload_kernel->list);
  628. kfree(user_payload_kernel);
  629. }
  630. }
  631. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  632. return 0;
  633. }
  634. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  635. bool valid_prio, unsigned int cmd, void *arg)
  636. {
  637. int32_t rc;
  638. struct sync_device *sync_dev = video_drvdata(filep);
  639. struct cam_private_ioctl_arg k_ioctl;
  640. if (!sync_dev) {
  641. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  642. return -EINVAL;
  643. }
  644. if (!arg)
  645. return -EINVAL;
  646. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  647. return -ENOIOCTLCMD;
  648. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  649. switch (k_ioctl.id) {
  650. case CAM_SYNC_CREATE:
  651. rc = cam_sync_handle_create(&k_ioctl);
  652. break;
  653. case CAM_SYNC_DESTROY:
  654. rc = cam_sync_handle_destroy(&k_ioctl);
  655. break;
  656. case CAM_SYNC_REGISTER_PAYLOAD:
  657. rc = cam_sync_handle_register_user_payload(
  658. &k_ioctl);
  659. break;
  660. case CAM_SYNC_DEREGISTER_PAYLOAD:
  661. rc = cam_sync_handle_deregister_user_payload(
  662. &k_ioctl);
  663. break;
  664. case CAM_SYNC_SIGNAL:
  665. rc = cam_sync_handle_signal(&k_ioctl);
  666. break;
  667. case CAM_SYNC_MERGE:
  668. rc = cam_sync_handle_merge(&k_ioctl);
  669. break;
  670. case CAM_SYNC_WAIT:
  671. rc = cam_sync_handle_wait(&k_ioctl);
  672. ((struct cam_private_ioctl_arg *)arg)->result =
  673. k_ioctl.result;
  674. break;
  675. default:
  676. rc = -ENOIOCTLCMD;
  677. }
  678. return rc;
  679. }
  680. static unsigned int cam_sync_poll(struct file *f,
  681. struct poll_table_struct *pll_table)
  682. {
  683. int rc = 0;
  684. struct v4l2_fh *eventq = f->private_data;
  685. if (!eventq)
  686. return -EINVAL;
  687. poll_wait(f, &eventq->wait, pll_table);
  688. if (v4l2_event_pending(eventq))
  689. rc = POLLPRI;
  690. return rc;
  691. }
  692. static int cam_sync_open(struct file *filep)
  693. {
  694. int rc;
  695. struct sync_device *sync_dev = video_drvdata(filep);
  696. if (!sync_dev) {
  697. CAM_ERR(CAM_SYNC, "Sync device NULL");
  698. return -ENODEV;
  699. }
  700. mutex_lock(&sync_dev->table_lock);
  701. if (sync_dev->open_cnt >= 1) {
  702. mutex_unlock(&sync_dev->table_lock);
  703. return -EALREADY;
  704. }
  705. rc = v4l2_fh_open(filep);
  706. if (!rc) {
  707. sync_dev->open_cnt++;
  708. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  709. sync_dev->cam_sync_eventq = filep->private_data;
  710. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  711. } else {
  712. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  713. }
  714. mutex_unlock(&sync_dev->table_lock);
  715. return rc;
  716. }
  717. static int cam_sync_close(struct file *filep)
  718. {
  719. int rc = 0;
  720. int i;
  721. struct sync_device *sync_dev = video_drvdata(filep);
  722. if (!sync_dev) {
  723. CAM_ERR(CAM_SYNC, "Sync device NULL");
  724. rc = -ENODEV;
  725. return rc;
  726. }
  727. mutex_lock(&sync_dev->table_lock);
  728. sync_dev->open_cnt--;
  729. if (!sync_dev->open_cnt) {
  730. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  731. struct sync_table_row *row =
  732. sync_dev->sync_table + i;
  733. /*
  734. * Signal all ACTIVE objects as ERR, but we don't
  735. * care about the return status here apart from logging
  736. * it.
  737. */
  738. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  739. rc = cam_sync_signal(i,
  740. CAM_SYNC_STATE_SIGNALED_ERROR,
  741. CAM_SYNC_COMMON_RELEASE_EVENT);
  742. if (rc < 0)
  743. CAM_ERR(CAM_SYNC,
  744. "Cleanup signal fail idx:%d\n",
  745. i);
  746. }
  747. }
  748. /*
  749. * Flush the work queue to wait for pending signal callbacks to
  750. * finish
  751. */
  752. flush_workqueue(sync_dev->work_queue);
  753. /*
  754. * Now that all callbacks worker threads have finished,
  755. * destroy the sync objects
  756. */
  757. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  758. struct sync_table_row *row =
  759. sync_dev->sync_table + i;
  760. if (row->state != CAM_SYNC_STATE_INVALID) {
  761. rc = cam_sync_destroy(i);
  762. if (rc < 0)
  763. CAM_ERR(CAM_SYNC,
  764. "Cleanup destroy fail:idx:%d\n",
  765. i);
  766. }
  767. }
  768. }
  769. mutex_unlock(&sync_dev->table_lock);
  770. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  771. sync_dev->cam_sync_eventq = NULL;
  772. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  773. v4l2_fh_release(filep);
  774. return rc;
  775. }
  776. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  777. struct v4l2_event *new)
  778. {
  779. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  780. struct cam_sync_ev_header_v2 *ev_header;
  781. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  782. CAM_ERR(CAM_CRM,
  783. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  784. old->id, ev_header->sync_obj, ev_header->status,
  785. ev_header->evt_param[0], ev_header->evt_param[1],
  786. ev_header->evt_param[2], ev_header->evt_param[3]);
  787. } else {
  788. struct cam_sync_ev_header *ev_header;
  789. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  790. CAM_ERR(CAM_CRM,
  791. "Failed to notify event id %d fence %d statue %d",
  792. old->id, ev_header->sync_obj, ev_header->status);
  793. }
  794. }
  795. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  796. .merge = cam_sync_event_queue_notify_error,
  797. };
  798. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  799. const struct v4l2_event_subscription *sub)
  800. {
  801. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  802. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  803. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  804. return -EINVAL;
  805. }
  806. sync_dev->version = sub->type;
  807. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  808. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  809. &cam_sync_v4l2_ops);
  810. }
  811. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  812. const struct v4l2_event_subscription *sub)
  813. {
  814. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  815. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  816. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  817. return -EINVAL;
  818. }
  819. return v4l2_event_unsubscribe(fh, sub);
  820. }
  821. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  822. .vidioc_subscribe_event = cam_sync_subscribe_event,
  823. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  824. .vidioc_default = cam_sync_dev_ioctl,
  825. };
  826. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  827. .owner = THIS_MODULE,
  828. .open = cam_sync_open,
  829. .release = cam_sync_close,
  830. .poll = cam_sync_poll,
  831. .unlocked_ioctl = video_ioctl2,
  832. #ifdef CONFIG_COMPAT
  833. .compat_ioctl32 = video_ioctl2,
  834. #endif
  835. };
  836. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  837. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  838. struct platform_device *pdev)
  839. {
  840. int rc;
  841. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  842. GFP_KERNEL);
  843. if (!sync_dev->v4l2_dev.mdev)
  844. return -ENOMEM;
  845. media_device_init(sync_dev->v4l2_dev.mdev);
  846. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  847. sizeof(sync_dev->v4l2_dev.mdev->model));
  848. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  849. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  850. if (rc < 0)
  851. goto register_fail;
  852. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  853. if (rc < 0)
  854. goto entity_fail;
  855. return 0;
  856. entity_fail:
  857. media_device_unregister(sync_dev->v4l2_dev.mdev);
  858. register_fail:
  859. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  860. return rc;
  861. }
  862. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  863. {
  864. media_entity_cleanup(&sync_dev->vdev->entity);
  865. media_device_unregister(sync_dev->v4l2_dev.mdev);
  866. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  867. kfree(sync_dev->v4l2_dev.mdev);
  868. }
  869. static void cam_sync_init_entity(struct sync_device *sync_dev)
  870. {
  871. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  872. sync_dev->vdev->entity.name =
  873. video_device_node_name(sync_dev->vdev);
  874. }
  875. #else
  876. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  877. struct platform_device *pdev)
  878. {
  879. return 0;
  880. }
  881. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  882. {
  883. }
  884. static void cam_sync_init_entity(struct sync_device *sync_dev)
  885. {
  886. }
  887. #endif
  888. static int cam_sync_create_debugfs(void)
  889. {
  890. int rc = 0;
  891. struct dentry *dbgfileptr = NULL;
  892. if (!cam_debugfs_available())
  893. return 0;
  894. rc = cam_debugfs_create_subdir("sync", &dbgfileptr);
  895. if (rc) {
  896. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  897. rc = -ENOENT;
  898. goto end;
  899. }
  900. /* Store parent inode for cleanup in caller */
  901. sync_dev->dentry = dbgfileptr;
  902. debugfs_create_bool("trigger_cb_without_switch", 0644,
  903. sync_dev->dentry, &trigger_cb_without_switch);
  904. end:
  905. return rc;
  906. }
  907. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  908. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  909. {
  910. int rc = 0;
  911. uint32_t sync_status = synx_status;
  912. switch (synx_status) {
  913. case SYNX_STATE_ACTIVE:
  914. sync_status = CAM_SYNC_STATE_ACTIVE;
  915. break;
  916. case SYNX_STATE_SIGNALED_SUCCESS:
  917. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  918. break;
  919. case SYNX_STATE_SIGNALED_ERROR:
  920. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  921. break;
  922. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  923. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  924. break;
  925. default:
  926. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  927. synx_status, sync_obj);
  928. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  929. break;
  930. }
  931. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  932. if (rc) {
  933. CAM_ERR(CAM_SYNC,
  934. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  935. sync_obj, synx_status, sync_status, rc);
  936. }
  937. return rc;
  938. }
  939. static int cam_sync_register_synx_bind_ops(
  940. struct synx_register_params *object)
  941. {
  942. int rc = 0;
  943. rc = synx_register_ops(object);
  944. if (rc)
  945. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  946. return rc;
  947. }
  948. static void cam_sync_unregister_synx_bind_ops(
  949. struct synx_register_params *object)
  950. {
  951. int rc = 0;
  952. rc = synx_deregister_ops(object);
  953. if (rc)
  954. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  955. }
  956. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  957. {
  958. struct synx_register_params *params = object;
  959. params->name = CAM_SYNC_NAME;
  960. params->type = SYNX_TYPE_CSL;
  961. params->ops.register_callback = cam_sync_register_callback;
  962. params->ops.deregister_callback = cam_sync_deregister_callback;
  963. params->ops.enable_signaling = cam_sync_get_obj_ref;
  964. params->ops.signal = cam_synx_sync_signal;
  965. }
  966. #endif
  967. static int cam_sync_component_bind(struct device *dev,
  968. struct device *master_dev, void *data)
  969. {
  970. int rc;
  971. int idx;
  972. struct platform_device *pdev = to_platform_device(dev);
  973. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  974. if (!sync_dev)
  975. return -ENOMEM;
  976. mutex_init(&sync_dev->table_lock);
  977. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  978. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  979. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  980. sync_dev->vdev = video_device_alloc();
  981. if (!sync_dev->vdev) {
  982. rc = -ENOMEM;
  983. goto vdev_fail;
  984. }
  985. rc = cam_sync_media_controller_init(sync_dev, pdev);
  986. if (rc < 0)
  987. goto mcinit_fail;
  988. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  989. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  990. if (rc < 0)
  991. goto register_fail;
  992. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  993. sizeof(sync_dev->vdev->name));
  994. sync_dev->vdev->release = video_device_release_empty;
  995. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  996. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  997. sync_dev->vdev->minor = -1;
  998. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  999. sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO;
  1000. rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1);
  1001. if (rc < 0) {
  1002. CAM_ERR(CAM_SYNC,
  1003. "video device registration failure rc = %d, name = %s, device_caps = %d",
  1004. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  1005. goto v4l2_fail;
  1006. }
  1007. cam_sync_init_entity(sync_dev);
  1008. video_set_drvdata(sync_dev->vdev, sync_dev);
  1009. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  1010. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  1011. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  1012. /*
  1013. * We treat zero as invalid handle, so we will keep the 0th bit set
  1014. * always
  1015. */
  1016. set_bit(0, sync_dev->bitmap);
  1017. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  1018. WQ_HIGHPRI | WQ_UNBOUND, 1);
  1019. if (!sync_dev->work_queue) {
  1020. CAM_ERR(CAM_SYNC,
  1021. "Error: high priority work queue creation failed");
  1022. rc = -ENOMEM;
  1023. goto v4l2_fail;
  1024. }
  1025. trigger_cb_without_switch = false;
  1026. cam_sync_create_debugfs();
  1027. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1028. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  1029. cam_sync_configure_synx_obj(&sync_dev->params);
  1030. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  1031. if (rc)
  1032. goto v4l2_fail;
  1033. #endif
  1034. CAM_DBG(CAM_SYNC, "Component bound successfully");
  1035. return rc;
  1036. v4l2_fail:
  1037. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1038. register_fail:
  1039. cam_sync_media_controller_cleanup(sync_dev);
  1040. mcinit_fail:
  1041. video_unregister_device(sync_dev->vdev);
  1042. video_device_release(sync_dev->vdev);
  1043. vdev_fail:
  1044. mutex_destroy(&sync_dev->table_lock);
  1045. kfree(sync_dev);
  1046. return rc;
  1047. }
  1048. static void cam_sync_component_unbind(struct device *dev,
  1049. struct device *master_dev, void *data)
  1050. {
  1051. int i;
  1052. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1053. cam_sync_media_controller_cleanup(sync_dev);
  1054. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1055. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1056. #endif
  1057. video_unregister_device(sync_dev->vdev);
  1058. video_device_release(sync_dev->vdev);
  1059. sync_dev->dentry = NULL;
  1060. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1061. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1062. kfree(sync_dev);
  1063. sync_dev = NULL;
  1064. }
  1065. const static struct component_ops cam_sync_component_ops = {
  1066. .bind = cam_sync_component_bind,
  1067. .unbind = cam_sync_component_unbind,
  1068. };
  1069. static int cam_sync_probe(struct platform_device *pdev)
  1070. {
  1071. int rc = 0;
  1072. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1073. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1074. if (rc)
  1075. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1076. return rc;
  1077. }
  1078. static int cam_sync_remove(struct platform_device *pdev)
  1079. {
  1080. component_del(&pdev->dev, &cam_sync_component_ops);
  1081. return 0;
  1082. }
  1083. static const struct of_device_id cam_sync_dt_match[] = {
  1084. {.compatible = "qcom,cam-sync"},
  1085. {}
  1086. };
  1087. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1088. struct platform_driver cam_sync_driver = {
  1089. .probe = cam_sync_probe,
  1090. .remove = cam_sync_remove,
  1091. .driver = {
  1092. .name = "cam_sync",
  1093. .owner = THIS_MODULE,
  1094. .of_match_table = cam_sync_dt_match,
  1095. .suppress_bind_attrs = true,
  1096. },
  1097. };
  1098. int cam_sync_init(void)
  1099. {
  1100. return platform_driver_register(&cam_sync_driver);
  1101. }
  1102. void cam_sync_exit(void)
  1103. {
  1104. platform_driver_unregister(&cam_sync_driver);
  1105. }
  1106. MODULE_DESCRIPTION("Camera sync driver");
  1107. MODULE_LICENSE("GPL v2");