cam_sync.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "camera_main.h"
  18. #include "cam_req_mgr_workq.h"
  19. struct sync_device *sync_dev;
  20. /*
  21. * Flag to determine whether to enqueue cb of a
  22. * signaled fence onto the workq or invoke it
  23. * directly in the same context
  24. */
  25. static bool trigger_cb_without_switch;
  26. static void cam_sync_print_fence_table(void)
  27. {
  28. int idx;
  29. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  30. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  31. CAM_INFO(CAM_SYNC,
  32. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  33. idx,
  34. sync_dev->sync_table[idx].sync_id,
  35. sync_dev->sync_table[idx].name,
  36. sync_dev->sync_table[idx].type,
  37. sync_dev->sync_table[idx].state,
  38. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  39. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  40. }
  41. }
  42. int cam_sync_create(int32_t *sync_obj, const char *name)
  43. {
  44. int rc;
  45. long idx;
  46. bool bit;
  47. do {
  48. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  49. if (idx >= CAM_SYNC_MAX_OBJS) {
  50. CAM_ERR(CAM_SYNC,
  51. "Error: Unable to create sync idx = %d reached max!",
  52. idx);
  53. cam_sync_print_fence_table();
  54. return -ENOMEM;
  55. }
  56. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  57. bit = test_and_set_bit(idx, sync_dev->bitmap);
  58. } while (bit);
  59. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  60. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  61. CAM_SYNC_TYPE_INDV);
  62. if (rc) {
  63. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  64. idx);
  65. clear_bit(idx, sync_dev->bitmap);
  66. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  67. return -EINVAL;
  68. }
  69. *sync_obj = idx;
  70. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  71. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  72. return rc;
  73. }
  74. int cam_sync_register_callback(sync_callback cb_func,
  75. void *userdata, int32_t sync_obj)
  76. {
  77. struct sync_callback_info *sync_cb;
  78. struct sync_table_row *row = NULL;
  79. int status = 0;
  80. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  81. return -EINVAL;
  82. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  83. row = sync_dev->sync_table + sync_obj;
  84. if (row->state == CAM_SYNC_STATE_INVALID) {
  85. CAM_ERR(CAM_SYNC,
  86. "Error: accessing an uninitialized sync obj %d",
  87. sync_obj);
  88. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  89. return -EINVAL;
  90. }
  91. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  92. if (!sync_cb) {
  93. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  94. return -ENOMEM;
  95. }
  96. /* Trigger callback if sync object is already in SIGNALED state */
  97. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  98. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  99. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  100. (!row->remaining)) {
  101. if (trigger_cb_without_switch) {
  102. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  103. sync_obj);
  104. status = row->state;
  105. kfree(sync_cb);
  106. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  107. cb_func(sync_obj, status, userdata);
  108. } else {
  109. sync_cb->callback_func = cb_func;
  110. sync_cb->cb_data = userdata;
  111. sync_cb->sync_obj = sync_obj;
  112. INIT_WORK(&sync_cb->cb_dispatch_work,
  113. cam_sync_util_cb_dispatch);
  114. sync_cb->status = row->state;
  115. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  116. sync_cb->sync_obj);
  117. sync_cb->workq_scheduled_ts = ktime_get();
  118. queue_work(sync_dev->work_queue,
  119. &sync_cb->cb_dispatch_work);
  120. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  121. }
  122. return 0;
  123. }
  124. sync_cb->callback_func = cb_func;
  125. sync_cb->cb_data = userdata;
  126. sync_cb->sync_obj = sync_obj;
  127. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  128. list_add_tail(&sync_cb->list, &row->callback_list);
  129. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  130. return 0;
  131. }
  132. int cam_sync_deregister_callback(sync_callback cb_func,
  133. void *userdata, int32_t sync_obj)
  134. {
  135. struct sync_table_row *row = NULL;
  136. struct sync_callback_info *sync_cb, *temp;
  137. bool found = false;
  138. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  139. return -EINVAL;
  140. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  141. row = sync_dev->sync_table + sync_obj;
  142. if (row->state == CAM_SYNC_STATE_INVALID) {
  143. CAM_ERR(CAM_SYNC,
  144. "Error: accessing an uninitialized sync obj = %d",
  145. sync_obj);
  146. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  147. return -EINVAL;
  148. }
  149. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  150. sync_obj);
  151. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  152. if (sync_cb->callback_func == cb_func &&
  153. sync_cb->cb_data == userdata) {
  154. list_del_init(&sync_cb->list);
  155. kfree(sync_cb);
  156. found = true;
  157. }
  158. }
  159. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  160. return found ? 0 : -ENOENT;
  161. }
  162. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  163. {
  164. struct sync_table_row *row = NULL;
  165. struct sync_table_row *parent_row = NULL;
  166. struct sync_parent_info *parent_info, *temp_parent_info;
  167. struct list_head parents_list;
  168. int rc = 0;
  169. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  170. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  171. sync_obj, CAM_SYNC_MAX_OBJS);
  172. return -EINVAL;
  173. }
  174. row = sync_dev->sync_table + sync_obj;
  175. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  176. if (row->state == CAM_SYNC_STATE_INVALID) {
  177. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  178. CAM_ERR(CAM_SYNC,
  179. "Error: accessing an uninitialized sync obj = %d",
  180. sync_obj);
  181. return -EINVAL;
  182. }
  183. if (row->type == CAM_SYNC_TYPE_GROUP) {
  184. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  185. CAM_ERR(CAM_SYNC,
  186. "Error: Signaling a GROUP sync object = %d",
  187. sync_obj);
  188. return -EINVAL;
  189. }
  190. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  191. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  192. CAM_ERR(CAM_SYNC,
  193. "Error: Sync object already signaled sync_obj = %d",
  194. sync_obj);
  195. return -EALREADY;
  196. }
  197. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  198. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  199. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  200. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  201. CAM_ERR(CAM_SYNC,
  202. "Error: signaling with undefined status = %d event reason = %u",
  203. status, event_cause);
  204. return -EINVAL;
  205. }
  206. if (!atomic_dec_and_test(&row->ref_cnt)) {
  207. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  208. return 0;
  209. }
  210. row->state = status;
  211. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  212. /* copy parent list to local and release child lock */
  213. INIT_LIST_HEAD(&parents_list);
  214. list_splice_init(&row->parents_list, &parents_list);
  215. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  216. if (list_empty(&parents_list))
  217. return 0;
  218. /*
  219. * Now iterate over all parents of this object and if they too need to
  220. * be signaled dispatch cb's
  221. */
  222. list_for_each_entry_safe(parent_info,
  223. temp_parent_info,
  224. &parents_list,
  225. list) {
  226. parent_row = sync_dev->sync_table + parent_info->sync_id;
  227. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  228. parent_row->remaining--;
  229. rc = cam_sync_util_update_parent_state(
  230. parent_row,
  231. status);
  232. if (rc) {
  233. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  234. parent_row->state);
  235. spin_unlock_bh(
  236. &sync_dev->row_spinlocks[parent_info->sync_id]);
  237. kfree(parent_info);
  238. continue;
  239. }
  240. if (!parent_row->remaining)
  241. cam_sync_util_dispatch_signaled_cb(
  242. parent_info->sync_id, parent_row->state,
  243. event_cause);
  244. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  245. list_del_init(&parent_info->list);
  246. kfree(parent_info);
  247. }
  248. return 0;
  249. }
  250. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  251. {
  252. int rc;
  253. long idx = 0;
  254. bool bit;
  255. int i = 0;
  256. if (!sync_obj || !merged_obj) {
  257. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  258. return -EINVAL;
  259. }
  260. if (num_objs <= 1) {
  261. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  262. return -EINVAL;
  263. }
  264. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  265. != num_objs) {
  266. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  267. return -EINVAL;
  268. }
  269. for (i = 0; i < num_objs; i++) {
  270. rc = cam_sync_check_valid(sync_obj[i]);
  271. if (rc) {
  272. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  273. i, sync_obj[i]);
  274. return rc;
  275. }
  276. }
  277. do {
  278. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  279. if (idx >= CAM_SYNC_MAX_OBJS)
  280. return -ENOMEM;
  281. bit = test_and_set_bit(idx, sync_dev->bitmap);
  282. } while (bit);
  283. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  284. rc = cam_sync_init_group_object(sync_dev->sync_table,
  285. idx, sync_obj,
  286. num_objs);
  287. if (rc < 0) {
  288. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  289. idx);
  290. clear_bit(idx, sync_dev->bitmap);
  291. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  292. return -EINVAL;
  293. }
  294. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  295. *merged_obj = idx;
  296. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  297. return 0;
  298. }
  299. int cam_sync_get_obj_ref(int32_t sync_obj)
  300. {
  301. struct sync_table_row *row = NULL;
  302. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  303. return -EINVAL;
  304. row = sync_dev->sync_table + sync_obj;
  305. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  306. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  307. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  308. CAM_ERR(CAM_SYNC,
  309. "Error: accessing an uninitialized sync obj = %d",
  310. sync_obj);
  311. return -EINVAL;
  312. }
  313. atomic_inc(&row->ref_cnt);
  314. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  315. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  316. return 0;
  317. }
  318. int cam_sync_put_obj_ref(int32_t sync_obj)
  319. {
  320. struct sync_table_row *row = NULL;
  321. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  322. return -EINVAL;
  323. row = sync_dev->sync_table + sync_obj;
  324. atomic_dec(&row->ref_cnt);
  325. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  326. return 0;
  327. }
  328. int cam_sync_destroy(int32_t sync_obj)
  329. {
  330. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  331. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  332. }
  333. int cam_sync_check_valid(int32_t sync_obj)
  334. {
  335. struct sync_table_row *row = NULL;
  336. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  337. return -EINVAL;
  338. row = sync_dev->sync_table + sync_obj;
  339. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  340. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %d",
  341. sync_obj);
  342. return -EINVAL;
  343. }
  344. if (row->state == CAM_SYNC_STATE_INVALID) {
  345. CAM_ERR(CAM_SYNC,
  346. "Error: accessing an uninitialized sync obj = %d",
  347. sync_obj);
  348. return -EINVAL;
  349. }
  350. return 0;
  351. }
  352. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  353. {
  354. unsigned long timeleft;
  355. int rc = -EINVAL;
  356. struct sync_table_row *row = NULL;
  357. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  358. return -EINVAL;
  359. row = sync_dev->sync_table + sync_obj;
  360. if (row->state == CAM_SYNC_STATE_INVALID) {
  361. CAM_ERR(CAM_SYNC,
  362. "Error: accessing an uninitialized sync obj = %d",
  363. sync_obj);
  364. return -EINVAL;
  365. }
  366. timeleft = wait_for_completion_timeout(&row->signaled,
  367. msecs_to_jiffies(timeout_ms));
  368. if (!timeleft) {
  369. CAM_ERR(CAM_SYNC,
  370. "Error: timed out for sync obj = %d", sync_obj);
  371. rc = -ETIMEDOUT;
  372. } else {
  373. switch (row->state) {
  374. case CAM_SYNC_STATE_INVALID:
  375. case CAM_SYNC_STATE_ACTIVE:
  376. case CAM_SYNC_STATE_SIGNALED_ERROR:
  377. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  378. CAM_ERR(CAM_SYNC,
  379. "Error: Wait on invalid state = %d, obj = %d",
  380. row->state, sync_obj);
  381. rc = -EINVAL;
  382. break;
  383. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  384. rc = 0;
  385. break;
  386. default:
  387. rc = -EINVAL;
  388. break;
  389. }
  390. }
  391. return rc;
  392. }
  393. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  394. {
  395. struct cam_sync_info sync_create;
  396. int result;
  397. if (k_ioctl->size != sizeof(struct cam_sync_info))
  398. return -EINVAL;
  399. if (!k_ioctl->ioctl_ptr)
  400. return -EINVAL;
  401. if (copy_from_user(&sync_create,
  402. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  403. k_ioctl->size))
  404. return -EFAULT;
  405. result = cam_sync_create(&sync_create.sync_obj,
  406. sync_create.name);
  407. if (!result)
  408. if (copy_to_user(
  409. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  410. &sync_create,
  411. k_ioctl->size))
  412. return -EFAULT;
  413. return result;
  414. }
  415. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  416. {
  417. int rc = 0;
  418. struct cam_sync_signal sync_signal;
  419. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  420. return -EINVAL;
  421. if (!k_ioctl->ioctl_ptr)
  422. return -EINVAL;
  423. if (copy_from_user(&sync_signal,
  424. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  425. k_ioctl->size))
  426. return -EFAULT;
  427. /* need to get ref for UMD signaled fences */
  428. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  429. if (rc) {
  430. CAM_DBG(CAM_SYNC,
  431. "Error: cannot signal an uninitialized sync obj = %d",
  432. sync_signal.sync_obj);
  433. return rc;
  434. }
  435. return cam_sync_signal(sync_signal.sync_obj,
  436. sync_signal.sync_state,
  437. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  438. }
  439. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  440. {
  441. struct cam_sync_merge sync_merge;
  442. uint32_t *sync_objs;
  443. uint32_t num_objs;
  444. uint32_t size;
  445. int result;
  446. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  447. return -EINVAL;
  448. if (!k_ioctl->ioctl_ptr)
  449. return -EINVAL;
  450. if (copy_from_user(&sync_merge,
  451. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  452. k_ioctl->size))
  453. return -EFAULT;
  454. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  455. return -EINVAL;
  456. size = sizeof(uint32_t) * sync_merge.num_objs;
  457. sync_objs = kzalloc(size, GFP_ATOMIC);
  458. if (!sync_objs)
  459. return -ENOMEM;
  460. if (copy_from_user(sync_objs,
  461. u64_to_user_ptr(sync_merge.sync_objs),
  462. sizeof(uint32_t) * sync_merge.num_objs)) {
  463. kfree(sync_objs);
  464. return -EFAULT;
  465. }
  466. num_objs = sync_merge.num_objs;
  467. result = cam_sync_merge(sync_objs,
  468. num_objs,
  469. &sync_merge.merged);
  470. if (!result)
  471. if (copy_to_user(
  472. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  473. &sync_merge,
  474. k_ioctl->size)) {
  475. kfree(sync_objs);
  476. return -EFAULT;
  477. }
  478. kfree(sync_objs);
  479. return result;
  480. }
  481. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  482. {
  483. struct cam_sync_wait sync_wait;
  484. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  485. return -EINVAL;
  486. if (!k_ioctl->ioctl_ptr)
  487. return -EINVAL;
  488. if (copy_from_user(&sync_wait,
  489. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  490. k_ioctl->size))
  491. return -EFAULT;
  492. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  493. sync_wait.timeout_ms);
  494. return 0;
  495. }
  496. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  497. {
  498. struct cam_sync_info sync_create;
  499. if (k_ioctl->size != sizeof(struct cam_sync_info))
  500. return -EINVAL;
  501. if (!k_ioctl->ioctl_ptr)
  502. return -EINVAL;
  503. if (copy_from_user(&sync_create,
  504. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  505. k_ioctl->size))
  506. return -EFAULT;
  507. return cam_sync_destroy(sync_create.sync_obj);
  508. }
  509. static int cam_sync_handle_register_user_payload(
  510. struct cam_private_ioctl_arg *k_ioctl)
  511. {
  512. struct cam_sync_userpayload_info userpayload_info;
  513. struct sync_user_payload *user_payload_kernel;
  514. struct sync_user_payload *user_payload_iter;
  515. struct sync_user_payload *temp_upayload_kernel;
  516. uint32_t sync_obj;
  517. struct sync_table_row *row = NULL;
  518. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  519. return -EINVAL;
  520. if (!k_ioctl->ioctl_ptr)
  521. return -EINVAL;
  522. if (copy_from_user(&userpayload_info,
  523. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  524. k_ioctl->size))
  525. return -EFAULT;
  526. sync_obj = userpayload_info.sync_obj;
  527. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  528. return -EINVAL;
  529. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  530. if (!user_payload_kernel)
  531. return -ENOMEM;
  532. memcpy(user_payload_kernel->payload_data,
  533. userpayload_info.payload,
  534. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  535. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  536. row = sync_dev->sync_table + sync_obj;
  537. if (row->state == CAM_SYNC_STATE_INVALID) {
  538. CAM_ERR(CAM_SYNC,
  539. "Error: accessing an uninitialized sync obj = %d",
  540. sync_obj);
  541. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  542. kfree(user_payload_kernel);
  543. return -EINVAL;
  544. }
  545. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  546. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  547. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  548. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  549. sync_obj,
  550. row->state,
  551. user_payload_kernel->payload_data,
  552. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  553. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  554. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  555. kfree(user_payload_kernel);
  556. return 0;
  557. }
  558. list_for_each_entry_safe(user_payload_iter,
  559. temp_upayload_kernel,
  560. &row->user_payload_list,
  561. list) {
  562. if (user_payload_iter->payload_data[0] ==
  563. user_payload_kernel->payload_data[0] &&
  564. user_payload_iter->payload_data[1] ==
  565. user_payload_kernel->payload_data[1]) {
  566. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  567. kfree(user_payload_kernel);
  568. return -EALREADY;
  569. }
  570. }
  571. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  572. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  573. return 0;
  574. }
  575. static int cam_sync_handle_deregister_user_payload(
  576. struct cam_private_ioctl_arg *k_ioctl)
  577. {
  578. struct cam_sync_userpayload_info userpayload_info;
  579. struct sync_user_payload *user_payload_kernel, *temp;
  580. uint32_t sync_obj;
  581. struct sync_table_row *row = NULL;
  582. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  583. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  584. return -EINVAL;
  585. }
  586. if (!k_ioctl->ioctl_ptr) {
  587. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  588. return -EINVAL;
  589. }
  590. if (copy_from_user(&userpayload_info,
  591. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  592. k_ioctl->size))
  593. return -EFAULT;
  594. sync_obj = userpayload_info.sync_obj;
  595. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  596. return -EINVAL;
  597. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  598. row = sync_dev->sync_table + sync_obj;
  599. if (row->state == CAM_SYNC_STATE_INVALID) {
  600. CAM_ERR(CAM_SYNC,
  601. "Error: accessing an uninitialized sync obj = %d",
  602. sync_obj);
  603. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  604. return -EINVAL;
  605. }
  606. list_for_each_entry_safe(user_payload_kernel, temp,
  607. &row->user_payload_list, list) {
  608. if (user_payload_kernel->payload_data[0] ==
  609. userpayload_info.payload[0] &&
  610. user_payload_kernel->payload_data[1] ==
  611. userpayload_info.payload[1]) {
  612. list_del_init(&user_payload_kernel->list);
  613. kfree(user_payload_kernel);
  614. }
  615. }
  616. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  617. return 0;
  618. }
  619. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  620. bool valid_prio, unsigned int cmd, void *arg)
  621. {
  622. int32_t rc;
  623. struct sync_device *sync_dev = video_drvdata(filep);
  624. struct cam_private_ioctl_arg k_ioctl;
  625. if (!sync_dev) {
  626. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  627. return -EINVAL;
  628. }
  629. if (!arg)
  630. return -EINVAL;
  631. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  632. return -ENOIOCTLCMD;
  633. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  634. switch (k_ioctl.id) {
  635. case CAM_SYNC_CREATE:
  636. rc = cam_sync_handle_create(&k_ioctl);
  637. break;
  638. case CAM_SYNC_DESTROY:
  639. rc = cam_sync_handle_destroy(&k_ioctl);
  640. break;
  641. case CAM_SYNC_REGISTER_PAYLOAD:
  642. rc = cam_sync_handle_register_user_payload(
  643. &k_ioctl);
  644. break;
  645. case CAM_SYNC_DEREGISTER_PAYLOAD:
  646. rc = cam_sync_handle_deregister_user_payload(
  647. &k_ioctl);
  648. break;
  649. case CAM_SYNC_SIGNAL:
  650. rc = cam_sync_handle_signal(&k_ioctl);
  651. break;
  652. case CAM_SYNC_MERGE:
  653. rc = cam_sync_handle_merge(&k_ioctl);
  654. break;
  655. case CAM_SYNC_WAIT:
  656. rc = cam_sync_handle_wait(&k_ioctl);
  657. ((struct cam_private_ioctl_arg *)arg)->result =
  658. k_ioctl.result;
  659. break;
  660. default:
  661. rc = -ENOIOCTLCMD;
  662. }
  663. return rc;
  664. }
  665. static unsigned int cam_sync_poll(struct file *f,
  666. struct poll_table_struct *pll_table)
  667. {
  668. int rc = 0;
  669. struct v4l2_fh *eventq = f->private_data;
  670. if (!eventq)
  671. return -EINVAL;
  672. poll_wait(f, &eventq->wait, pll_table);
  673. if (v4l2_event_pending(eventq))
  674. rc = POLLPRI;
  675. return rc;
  676. }
  677. static int cam_sync_open(struct file *filep)
  678. {
  679. int rc;
  680. struct sync_device *sync_dev = video_drvdata(filep);
  681. if (!sync_dev) {
  682. CAM_ERR(CAM_SYNC, "Sync device NULL");
  683. return -ENODEV;
  684. }
  685. mutex_lock(&sync_dev->table_lock);
  686. if (sync_dev->open_cnt >= 1) {
  687. mutex_unlock(&sync_dev->table_lock);
  688. return -EALREADY;
  689. }
  690. rc = v4l2_fh_open(filep);
  691. if (!rc) {
  692. sync_dev->open_cnt++;
  693. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  694. sync_dev->cam_sync_eventq = filep->private_data;
  695. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  696. } else {
  697. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  698. }
  699. mutex_unlock(&sync_dev->table_lock);
  700. return rc;
  701. }
  702. static int cam_sync_close(struct file *filep)
  703. {
  704. int rc = 0;
  705. int i;
  706. struct sync_device *sync_dev = video_drvdata(filep);
  707. if (!sync_dev) {
  708. CAM_ERR(CAM_SYNC, "Sync device NULL");
  709. rc = -ENODEV;
  710. return rc;
  711. }
  712. mutex_lock(&sync_dev->table_lock);
  713. sync_dev->open_cnt--;
  714. if (!sync_dev->open_cnt) {
  715. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  716. struct sync_table_row *row =
  717. sync_dev->sync_table + i;
  718. /*
  719. * Signal all ACTIVE objects as ERR, but we don't
  720. * care about the return status here apart from logging
  721. * it.
  722. */
  723. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  724. rc = cam_sync_signal(i,
  725. CAM_SYNC_STATE_SIGNALED_ERROR,
  726. CAM_SYNC_COMMON_RELEASE_EVENT);
  727. if (rc < 0)
  728. CAM_ERR(CAM_SYNC,
  729. "Cleanup signal fail idx:%d\n",
  730. i);
  731. }
  732. }
  733. /*
  734. * Flush the work queue to wait for pending signal callbacks to
  735. * finish
  736. */
  737. flush_workqueue(sync_dev->work_queue);
  738. /*
  739. * Now that all callbacks worker threads have finished,
  740. * destroy the sync objects
  741. */
  742. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  743. struct sync_table_row *row =
  744. sync_dev->sync_table + i;
  745. if (row->state != CAM_SYNC_STATE_INVALID) {
  746. rc = cam_sync_destroy(i);
  747. if (rc < 0)
  748. CAM_ERR(CAM_SYNC,
  749. "Cleanup destroy fail:idx:%d\n",
  750. i);
  751. }
  752. }
  753. }
  754. mutex_unlock(&sync_dev->table_lock);
  755. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  756. sync_dev->cam_sync_eventq = NULL;
  757. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  758. v4l2_fh_release(filep);
  759. return rc;
  760. }
  761. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  762. struct v4l2_event *new)
  763. {
  764. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  765. struct cam_sync_ev_header_v2 *ev_header;
  766. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  767. CAM_ERR(CAM_CRM,
  768. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  769. old->id, ev_header->sync_obj, ev_header->status,
  770. ev_header->evt_param[0], ev_header->evt_param[1],
  771. ev_header->evt_param[2], ev_header->evt_param[3]);
  772. } else {
  773. struct cam_sync_ev_header *ev_header;
  774. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  775. CAM_ERR(CAM_CRM,
  776. "Failed to notify event id %d fence %d statue %d",
  777. old->id, ev_header->sync_obj, ev_header->status);
  778. }
  779. }
  780. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  781. .merge = cam_sync_event_queue_notify_error,
  782. };
  783. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  784. const struct v4l2_event_subscription *sub)
  785. {
  786. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  787. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  788. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  789. return -EINVAL;
  790. }
  791. sync_dev->version = sub->type;
  792. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  793. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  794. &cam_sync_v4l2_ops);
  795. }
  796. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  797. const struct v4l2_event_subscription *sub)
  798. {
  799. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  800. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  801. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  802. return -EINVAL;
  803. }
  804. return v4l2_event_unsubscribe(fh, sub);
  805. }
  806. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  807. .vidioc_subscribe_event = cam_sync_subscribe_event,
  808. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  809. .vidioc_default = cam_sync_dev_ioctl,
  810. };
  811. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  812. .owner = THIS_MODULE,
  813. .open = cam_sync_open,
  814. .release = cam_sync_close,
  815. .poll = cam_sync_poll,
  816. .unlocked_ioctl = video_ioctl2,
  817. #ifdef CONFIG_COMPAT
  818. .compat_ioctl32 = video_ioctl2,
  819. #endif
  820. };
  821. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  822. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  823. struct platform_device *pdev)
  824. {
  825. int rc;
  826. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  827. GFP_KERNEL);
  828. if (!sync_dev->v4l2_dev.mdev)
  829. return -ENOMEM;
  830. media_device_init(sync_dev->v4l2_dev.mdev);
  831. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  832. sizeof(sync_dev->v4l2_dev.mdev->model));
  833. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  834. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  835. if (rc < 0)
  836. goto register_fail;
  837. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  838. if (rc < 0)
  839. goto entity_fail;
  840. return 0;
  841. entity_fail:
  842. media_device_unregister(sync_dev->v4l2_dev.mdev);
  843. register_fail:
  844. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  845. return rc;
  846. }
  847. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  848. {
  849. media_entity_cleanup(&sync_dev->vdev->entity);
  850. media_device_unregister(sync_dev->v4l2_dev.mdev);
  851. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  852. kfree(sync_dev->v4l2_dev.mdev);
  853. }
  854. static void cam_sync_init_entity(struct sync_device *sync_dev)
  855. {
  856. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  857. sync_dev->vdev->entity.name =
  858. video_device_node_name(sync_dev->vdev);
  859. }
  860. #else
  861. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  862. struct platform_device *pdev)
  863. {
  864. return 0;
  865. }
  866. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  867. {
  868. }
  869. static void cam_sync_init_entity(struct sync_device *sync_dev)
  870. {
  871. }
  872. #endif
  873. static int cam_sync_create_debugfs(void)
  874. {
  875. int rc = 0;
  876. struct dentry *dbgfileptr = NULL;
  877. dbgfileptr = debugfs_create_dir("camera_sync", NULL);
  878. if (!dbgfileptr) {
  879. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  880. rc = -ENOENT;
  881. goto end;
  882. }
  883. /* Store parent inode for cleanup in caller */
  884. sync_dev->dentry = dbgfileptr;
  885. dbgfileptr = debugfs_create_bool("trigger_cb_without_switch", 0644,
  886. sync_dev->dentry, &trigger_cb_without_switch);
  887. if (IS_ERR(dbgfileptr)) {
  888. if (PTR_ERR(dbgfileptr) == -ENODEV)
  889. CAM_WARN(CAM_SYNC, "DebugFS not enabled in kernel!");
  890. else
  891. rc = PTR_ERR(dbgfileptr);
  892. }
  893. end:
  894. return rc;
  895. }
  896. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  897. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  898. {
  899. int rc = 0;
  900. uint32_t sync_status = synx_status;
  901. switch (synx_status) {
  902. case SYNX_STATE_ACTIVE:
  903. sync_status = CAM_SYNC_STATE_ACTIVE;
  904. break;
  905. case SYNX_STATE_SIGNALED_SUCCESS:
  906. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  907. break;
  908. case SYNX_STATE_SIGNALED_ERROR:
  909. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  910. break;
  911. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  912. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  913. break;
  914. default:
  915. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  916. synx_status, sync_obj);
  917. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  918. break;
  919. }
  920. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  921. if (rc) {
  922. CAM_ERR(CAM_SYNC,
  923. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  924. sync_obj, synx_status, sync_status, rc);
  925. }
  926. return rc;
  927. }
  928. static int cam_sync_register_synx_bind_ops(
  929. struct synx_register_params *object)
  930. {
  931. int rc = 0;
  932. rc = synx_register_ops(object);
  933. if (rc)
  934. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  935. return rc;
  936. }
  937. static void cam_sync_unregister_synx_bind_ops(
  938. struct synx_register_params *object)
  939. {
  940. int rc = 0;
  941. rc = synx_deregister_ops(object);
  942. if (rc)
  943. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  944. }
  945. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  946. {
  947. struct synx_register_params *params = object;
  948. params->name = CAM_SYNC_NAME;
  949. params->type = SYNX_TYPE_CSL;
  950. params->ops.register_callback = cam_sync_register_callback;
  951. params->ops.deregister_callback = cam_sync_deregister_callback;
  952. params->ops.enable_signaling = cam_sync_get_obj_ref;
  953. params->ops.signal = cam_synx_sync_signal;
  954. }
  955. #endif
  956. static int cam_sync_component_bind(struct device *dev,
  957. struct device *master_dev, void *data)
  958. {
  959. int rc;
  960. int idx;
  961. struct platform_device *pdev = to_platform_device(dev);
  962. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  963. if (!sync_dev)
  964. return -ENOMEM;
  965. mutex_init(&sync_dev->table_lock);
  966. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  967. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  968. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  969. sync_dev->vdev = video_device_alloc();
  970. if (!sync_dev->vdev) {
  971. rc = -ENOMEM;
  972. goto vdev_fail;
  973. }
  974. rc = cam_sync_media_controller_init(sync_dev, pdev);
  975. if (rc < 0)
  976. goto mcinit_fail;
  977. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  978. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  979. if (rc < 0)
  980. goto register_fail;
  981. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  982. sizeof(sync_dev->vdev->name));
  983. sync_dev->vdev->release = video_device_release_empty;
  984. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  985. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  986. sync_dev->vdev->minor = -1;
  987. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  988. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  989. rc = video_register_device(sync_dev->vdev,
  990. VFL_TYPE_GRABBER, -1);
  991. if (rc < 0) {
  992. CAM_ERR(CAM_SYNC,
  993. "video device registration failure rc = %d, name = %s, device_caps = %d",
  994. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  995. goto v4l2_fail;
  996. }
  997. cam_sync_init_entity(sync_dev);
  998. video_set_drvdata(sync_dev->vdev, sync_dev);
  999. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  1000. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  1001. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  1002. /*
  1003. * We treat zero as invalid handle, so we will keep the 0th bit set
  1004. * always
  1005. */
  1006. set_bit(0, sync_dev->bitmap);
  1007. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  1008. WQ_HIGHPRI | WQ_UNBOUND, 1);
  1009. if (!sync_dev->work_queue) {
  1010. CAM_ERR(CAM_SYNC,
  1011. "Error: high priority work queue creation failed");
  1012. rc = -ENOMEM;
  1013. goto v4l2_fail;
  1014. }
  1015. trigger_cb_without_switch = false;
  1016. cam_sync_create_debugfs();
  1017. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1018. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  1019. cam_sync_configure_synx_obj(&sync_dev->params);
  1020. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  1021. if (rc)
  1022. goto v4l2_fail;
  1023. #endif
  1024. CAM_DBG(CAM_SYNC, "Component bound successfully");
  1025. return rc;
  1026. v4l2_fail:
  1027. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1028. register_fail:
  1029. cam_sync_media_controller_cleanup(sync_dev);
  1030. mcinit_fail:
  1031. video_unregister_device(sync_dev->vdev);
  1032. video_device_release(sync_dev->vdev);
  1033. vdev_fail:
  1034. mutex_destroy(&sync_dev->table_lock);
  1035. kfree(sync_dev);
  1036. return rc;
  1037. }
  1038. static void cam_sync_component_unbind(struct device *dev,
  1039. struct device *master_dev, void *data)
  1040. {
  1041. int i;
  1042. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1043. cam_sync_media_controller_cleanup(sync_dev);
  1044. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1045. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1046. #endif
  1047. video_unregister_device(sync_dev->vdev);
  1048. video_device_release(sync_dev->vdev);
  1049. debugfs_remove_recursive(sync_dev->dentry);
  1050. sync_dev->dentry = NULL;
  1051. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1052. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1053. kfree(sync_dev);
  1054. sync_dev = NULL;
  1055. }
  1056. const static struct component_ops cam_sync_component_ops = {
  1057. .bind = cam_sync_component_bind,
  1058. .unbind = cam_sync_component_unbind,
  1059. };
  1060. static int cam_sync_probe(struct platform_device *pdev)
  1061. {
  1062. int rc = 0;
  1063. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1064. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1065. if (rc)
  1066. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1067. return rc;
  1068. }
  1069. static int cam_sync_remove(struct platform_device *pdev)
  1070. {
  1071. component_del(&pdev->dev, &cam_sync_component_ops);
  1072. return 0;
  1073. }
  1074. static const struct of_device_id cam_sync_dt_match[] = {
  1075. {.compatible = "qcom,cam-sync"},
  1076. {}
  1077. };
  1078. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1079. struct platform_driver cam_sync_driver = {
  1080. .probe = cam_sync_probe,
  1081. .remove = cam_sync_remove,
  1082. .driver = {
  1083. .name = "cam_sync",
  1084. .owner = THIS_MODULE,
  1085. .of_match_table = cam_sync_dt_match,
  1086. .suppress_bind_attrs = true,
  1087. },
  1088. };
  1089. int cam_sync_init(void)
  1090. {
  1091. return platform_driver_register(&cam_sync_driver);
  1092. }
  1093. void cam_sync_exit(void)
  1094. {
  1095. platform_driver_unregister(&cam_sync_driver);
  1096. }
  1097. MODULE_DESCRIPTION("Camera sync driver");
  1098. MODULE_LICENSE("GPL v2");