cam_sync.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "camera_main.h"
  18. struct sync_device *sync_dev;
  19. /*
  20. * Flag to determine whether to enqueue cb of a
  21. * signaled fence onto the workq or invoke it
  22. * directly in the same context
  23. */
  24. static bool trigger_cb_without_switch;
  25. static void cam_sync_print_fence_table(void)
  26. {
  27. int idx;
  28. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  29. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  30. CAM_INFO(CAM_SYNC,
  31. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  32. idx,
  33. sync_dev->sync_table[idx].sync_id,
  34. sync_dev->sync_table[idx].name,
  35. sync_dev->sync_table[idx].type,
  36. sync_dev->sync_table[idx].state,
  37. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  38. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  39. }
  40. }
  41. int cam_sync_create(int32_t *sync_obj, const char *name)
  42. {
  43. int rc;
  44. long idx;
  45. bool bit;
  46. do {
  47. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  48. if (idx >= CAM_SYNC_MAX_OBJS) {
  49. CAM_ERR(CAM_SYNC,
  50. "Error: Unable to create sync idx = %d reached max!",
  51. idx);
  52. cam_sync_print_fence_table();
  53. return -ENOMEM;
  54. }
  55. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  56. bit = test_and_set_bit(idx, sync_dev->bitmap);
  57. } while (bit);
  58. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  59. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  60. CAM_SYNC_TYPE_INDV);
  61. if (rc) {
  62. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  63. idx);
  64. clear_bit(idx, sync_dev->bitmap);
  65. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  66. return -EINVAL;
  67. }
  68. *sync_obj = idx;
  69. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  70. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  71. return rc;
  72. }
  73. int cam_sync_register_callback(sync_callback cb_func,
  74. void *userdata, int32_t sync_obj)
  75. {
  76. struct sync_callback_info *sync_cb;
  77. struct sync_table_row *row = NULL;
  78. int status = 0;
  79. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  80. return -EINVAL;
  81. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  82. row = sync_dev->sync_table + sync_obj;
  83. if (row->state == CAM_SYNC_STATE_INVALID) {
  84. CAM_ERR(CAM_SYNC,
  85. "Error: accessing an uninitialized sync obj %d",
  86. sync_obj);
  87. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  88. return -EINVAL;
  89. }
  90. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  91. if (!sync_cb) {
  92. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  93. return -ENOMEM;
  94. }
  95. /* Trigger callback if sync object is already in SIGNALED state */
  96. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  97. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  98. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  99. (!row->remaining)) {
  100. if (trigger_cb_without_switch) {
  101. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  102. sync_obj);
  103. status = row->state;
  104. kfree(sync_cb);
  105. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  106. cb_func(sync_obj, status, userdata);
  107. } else {
  108. sync_cb->callback_func = cb_func;
  109. sync_cb->cb_data = userdata;
  110. sync_cb->sync_obj = sync_obj;
  111. INIT_WORK(&sync_cb->cb_dispatch_work,
  112. cam_sync_util_cb_dispatch);
  113. sync_cb->status = row->state;
  114. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  115. sync_cb->sync_obj);
  116. queue_work(sync_dev->work_queue,
  117. &sync_cb->cb_dispatch_work);
  118. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  119. }
  120. return 0;
  121. }
  122. sync_cb->callback_func = cb_func;
  123. sync_cb->cb_data = userdata;
  124. sync_cb->sync_obj = sync_obj;
  125. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  126. list_add_tail(&sync_cb->list, &row->callback_list);
  127. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  128. return 0;
  129. }
  130. int cam_sync_deregister_callback(sync_callback cb_func,
  131. void *userdata, int32_t sync_obj)
  132. {
  133. struct sync_table_row *row = NULL;
  134. struct sync_callback_info *sync_cb, *temp;
  135. bool found = false;
  136. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  137. return -EINVAL;
  138. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  139. row = sync_dev->sync_table + sync_obj;
  140. if (row->state == CAM_SYNC_STATE_INVALID) {
  141. CAM_ERR(CAM_SYNC,
  142. "Error: accessing an uninitialized sync obj = %d",
  143. sync_obj);
  144. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  145. return -EINVAL;
  146. }
  147. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  148. sync_obj);
  149. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  150. if (sync_cb->callback_func == cb_func &&
  151. sync_cb->cb_data == userdata) {
  152. list_del_init(&sync_cb->list);
  153. kfree(sync_cb);
  154. found = true;
  155. }
  156. }
  157. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  158. return found ? 0 : -ENOENT;
  159. }
  160. int cam_sync_signal(int32_t sync_obj, uint32_t status)
  161. {
  162. struct sync_table_row *row = NULL;
  163. struct sync_table_row *parent_row = NULL;
  164. struct sync_parent_info *parent_info, *temp_parent_info;
  165. struct list_head parents_list;
  166. int rc = 0;
  167. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  168. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  169. sync_obj, CAM_SYNC_MAX_OBJS);
  170. return -EINVAL;
  171. }
  172. row = sync_dev->sync_table + sync_obj;
  173. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  174. if (row->state == CAM_SYNC_STATE_INVALID) {
  175. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  176. CAM_ERR(CAM_SYNC,
  177. "Error: accessing an uninitialized sync obj = %d",
  178. sync_obj);
  179. return -EINVAL;
  180. }
  181. if (row->type == CAM_SYNC_TYPE_GROUP) {
  182. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  183. CAM_ERR(CAM_SYNC,
  184. "Error: Signaling a GROUP sync object = %d",
  185. sync_obj);
  186. return -EINVAL;
  187. }
  188. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  189. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  190. CAM_ERR(CAM_SYNC,
  191. "Error: Sync object already signaled sync_obj = %d",
  192. sync_obj);
  193. return -EALREADY;
  194. }
  195. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  196. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  197. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  198. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  199. CAM_ERR(CAM_SYNC,
  200. "Error: signaling with undefined status = %d",
  201. status);
  202. return -EINVAL;
  203. }
  204. if (!atomic_dec_and_test(&row->ref_cnt)) {
  205. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  206. return 0;
  207. }
  208. row->state = status;
  209. cam_sync_util_dispatch_signaled_cb(sync_obj, status);
  210. /* copy parent list to local and release child lock */
  211. INIT_LIST_HEAD(&parents_list);
  212. list_splice_init(&row->parents_list, &parents_list);
  213. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  214. if (list_empty(&parents_list))
  215. return 0;
  216. /*
  217. * Now iterate over all parents of this object and if they too need to
  218. * be signaled dispatch cb's
  219. */
  220. list_for_each_entry_safe(parent_info,
  221. temp_parent_info,
  222. &parents_list,
  223. list) {
  224. parent_row = sync_dev->sync_table + parent_info->sync_id;
  225. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  226. parent_row->remaining--;
  227. rc = cam_sync_util_update_parent_state(
  228. parent_row,
  229. status);
  230. if (rc) {
  231. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  232. parent_row->state);
  233. spin_unlock_bh(
  234. &sync_dev->row_spinlocks[parent_info->sync_id]);
  235. kfree(parent_info);
  236. continue;
  237. }
  238. if (!parent_row->remaining)
  239. cam_sync_util_dispatch_signaled_cb(
  240. parent_info->sync_id, parent_row->state);
  241. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  242. list_del_init(&parent_info->list);
  243. kfree(parent_info);
  244. }
  245. return 0;
  246. }
  247. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  248. {
  249. int rc;
  250. long idx = 0;
  251. bool bit;
  252. int i = 0;
  253. if (!sync_obj || !merged_obj) {
  254. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  255. return -EINVAL;
  256. }
  257. if (num_objs <= 1) {
  258. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  259. return -EINVAL;
  260. }
  261. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  262. != num_objs) {
  263. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  264. return -EINVAL;
  265. }
  266. for (i = 0; i < num_objs; i++) {
  267. rc = cam_sync_check_valid(sync_obj[i]);
  268. if (rc) {
  269. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  270. i, sync_obj[i]);
  271. return rc;
  272. }
  273. }
  274. do {
  275. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  276. if (idx >= CAM_SYNC_MAX_OBJS)
  277. return -ENOMEM;
  278. bit = test_and_set_bit(idx, sync_dev->bitmap);
  279. } while (bit);
  280. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  281. rc = cam_sync_init_group_object(sync_dev->sync_table,
  282. idx, sync_obj,
  283. num_objs);
  284. if (rc < 0) {
  285. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  286. idx);
  287. clear_bit(idx, sync_dev->bitmap);
  288. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  289. return -EINVAL;
  290. }
  291. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  292. *merged_obj = idx;
  293. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  294. return 0;
  295. }
  296. int cam_sync_get_obj_ref(int32_t sync_obj)
  297. {
  298. struct sync_table_row *row = NULL;
  299. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  300. return -EINVAL;
  301. row = sync_dev->sync_table + sync_obj;
  302. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  303. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  304. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  305. CAM_ERR(CAM_SYNC,
  306. "Error: accessing an uninitialized sync obj = %d",
  307. sync_obj);
  308. return -EINVAL;
  309. }
  310. atomic_inc(&row->ref_cnt);
  311. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  312. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  313. return 0;
  314. }
  315. int cam_sync_put_obj_ref(int32_t sync_obj)
  316. {
  317. struct sync_table_row *row = NULL;
  318. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  319. return -EINVAL;
  320. row = sync_dev->sync_table + sync_obj;
  321. atomic_dec(&row->ref_cnt);
  322. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  323. return 0;
  324. }
  325. int cam_sync_destroy(int32_t sync_obj)
  326. {
  327. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  328. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  329. }
  330. int cam_sync_check_valid(int32_t sync_obj)
  331. {
  332. struct sync_table_row *row = NULL;
  333. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  334. return -EINVAL;
  335. row = sync_dev->sync_table + sync_obj;
  336. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  337. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %d",
  338. sync_obj);
  339. return -EINVAL;
  340. }
  341. if (row->state == CAM_SYNC_STATE_INVALID) {
  342. CAM_ERR(CAM_SYNC,
  343. "Error: accessing an uninitialized sync obj = %d",
  344. sync_obj);
  345. return -EINVAL;
  346. }
  347. return 0;
  348. }
  349. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  350. {
  351. unsigned long timeleft;
  352. int rc = -EINVAL;
  353. struct sync_table_row *row = NULL;
  354. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  355. return -EINVAL;
  356. row = sync_dev->sync_table + sync_obj;
  357. if (row->state == CAM_SYNC_STATE_INVALID) {
  358. CAM_ERR(CAM_SYNC,
  359. "Error: accessing an uninitialized sync obj = %d",
  360. sync_obj);
  361. return -EINVAL;
  362. }
  363. timeleft = wait_for_completion_timeout(&row->signaled,
  364. msecs_to_jiffies(timeout_ms));
  365. if (!timeleft) {
  366. CAM_ERR(CAM_SYNC,
  367. "Error: timed out for sync obj = %d", sync_obj);
  368. rc = -ETIMEDOUT;
  369. } else {
  370. switch (row->state) {
  371. case CAM_SYNC_STATE_INVALID:
  372. case CAM_SYNC_STATE_ACTIVE:
  373. case CAM_SYNC_STATE_SIGNALED_ERROR:
  374. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  375. CAM_ERR(CAM_SYNC,
  376. "Error: Wait on invalid state = %d, obj = %d",
  377. row->state, sync_obj);
  378. rc = -EINVAL;
  379. break;
  380. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  381. rc = 0;
  382. break;
  383. default:
  384. rc = -EINVAL;
  385. break;
  386. }
  387. }
  388. return rc;
  389. }
  390. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  391. {
  392. struct cam_sync_info sync_create;
  393. int result;
  394. if (k_ioctl->size != sizeof(struct cam_sync_info))
  395. return -EINVAL;
  396. if (!k_ioctl->ioctl_ptr)
  397. return -EINVAL;
  398. if (copy_from_user(&sync_create,
  399. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  400. k_ioctl->size))
  401. return -EFAULT;
  402. result = cam_sync_create(&sync_create.sync_obj,
  403. sync_create.name);
  404. if (!result)
  405. if (copy_to_user(
  406. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  407. &sync_create,
  408. k_ioctl->size))
  409. return -EFAULT;
  410. return result;
  411. }
  412. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  413. {
  414. int rc = 0;
  415. struct cam_sync_signal sync_signal;
  416. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  417. return -EINVAL;
  418. if (!k_ioctl->ioctl_ptr)
  419. return -EINVAL;
  420. if (copy_from_user(&sync_signal,
  421. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  422. k_ioctl->size))
  423. return -EFAULT;
  424. /* need to get ref for UMD signaled fences */
  425. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  426. if (rc) {
  427. CAM_DBG(CAM_SYNC,
  428. "Error: cannot signal an uninitialized sync obj = %d",
  429. sync_signal.sync_obj);
  430. return rc;
  431. }
  432. return cam_sync_signal(sync_signal.sync_obj,
  433. sync_signal.sync_state);
  434. }
  435. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  436. {
  437. struct cam_sync_merge sync_merge;
  438. uint32_t *sync_objs;
  439. uint32_t num_objs;
  440. uint32_t size;
  441. int result;
  442. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  443. return -EINVAL;
  444. if (!k_ioctl->ioctl_ptr)
  445. return -EINVAL;
  446. if (copy_from_user(&sync_merge,
  447. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  448. k_ioctl->size))
  449. return -EFAULT;
  450. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  451. return -EINVAL;
  452. size = sizeof(uint32_t) * sync_merge.num_objs;
  453. sync_objs = kzalloc(size, GFP_ATOMIC);
  454. if (!sync_objs)
  455. return -ENOMEM;
  456. if (copy_from_user(sync_objs,
  457. u64_to_user_ptr(sync_merge.sync_objs),
  458. sizeof(uint32_t) * sync_merge.num_objs)) {
  459. kfree(sync_objs);
  460. return -EFAULT;
  461. }
  462. num_objs = sync_merge.num_objs;
  463. result = cam_sync_merge(sync_objs,
  464. num_objs,
  465. &sync_merge.merged);
  466. if (!result)
  467. if (copy_to_user(
  468. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  469. &sync_merge,
  470. k_ioctl->size)) {
  471. kfree(sync_objs);
  472. return -EFAULT;
  473. }
  474. kfree(sync_objs);
  475. return result;
  476. }
  477. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  478. {
  479. struct cam_sync_wait sync_wait;
  480. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  481. return -EINVAL;
  482. if (!k_ioctl->ioctl_ptr)
  483. return -EINVAL;
  484. if (copy_from_user(&sync_wait,
  485. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  486. k_ioctl->size))
  487. return -EFAULT;
  488. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  489. sync_wait.timeout_ms);
  490. return 0;
  491. }
  492. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  493. {
  494. struct cam_sync_info sync_create;
  495. if (k_ioctl->size != sizeof(struct cam_sync_info))
  496. return -EINVAL;
  497. if (!k_ioctl->ioctl_ptr)
  498. return -EINVAL;
  499. if (copy_from_user(&sync_create,
  500. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  501. k_ioctl->size))
  502. return -EFAULT;
  503. return cam_sync_destroy(sync_create.sync_obj);
  504. }
  505. static int cam_sync_handle_register_user_payload(
  506. struct cam_private_ioctl_arg *k_ioctl)
  507. {
  508. struct cam_sync_userpayload_info userpayload_info;
  509. struct sync_user_payload *user_payload_kernel;
  510. struct sync_user_payload *user_payload_iter;
  511. struct sync_user_payload *temp_upayload_kernel;
  512. uint32_t sync_obj;
  513. struct sync_table_row *row = NULL;
  514. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  515. return -EINVAL;
  516. if (!k_ioctl->ioctl_ptr)
  517. return -EINVAL;
  518. if (copy_from_user(&userpayload_info,
  519. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  520. k_ioctl->size))
  521. return -EFAULT;
  522. sync_obj = userpayload_info.sync_obj;
  523. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  524. return -EINVAL;
  525. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  526. if (!user_payload_kernel)
  527. return -ENOMEM;
  528. memcpy(user_payload_kernel->payload_data,
  529. userpayload_info.payload,
  530. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  531. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  532. row = sync_dev->sync_table + sync_obj;
  533. if (row->state == CAM_SYNC_STATE_INVALID) {
  534. CAM_ERR(CAM_SYNC,
  535. "Error: accessing an uninitialized sync obj = %d",
  536. sync_obj);
  537. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  538. kfree(user_payload_kernel);
  539. return -EINVAL;
  540. }
  541. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  542. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  543. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  544. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  545. sync_obj,
  546. row->state,
  547. user_payload_kernel->payload_data,
  548. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
  549. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  550. kfree(user_payload_kernel);
  551. return 0;
  552. }
  553. list_for_each_entry_safe(user_payload_iter,
  554. temp_upayload_kernel,
  555. &row->user_payload_list,
  556. list) {
  557. if (user_payload_iter->payload_data[0] ==
  558. user_payload_kernel->payload_data[0] &&
  559. user_payload_iter->payload_data[1] ==
  560. user_payload_kernel->payload_data[1]) {
  561. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  562. kfree(user_payload_kernel);
  563. return -EALREADY;
  564. }
  565. }
  566. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  567. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  568. return 0;
  569. }
  570. static int cam_sync_handle_deregister_user_payload(
  571. struct cam_private_ioctl_arg *k_ioctl)
  572. {
  573. struct cam_sync_userpayload_info userpayload_info;
  574. struct sync_user_payload *user_payload_kernel, *temp;
  575. uint32_t sync_obj;
  576. struct sync_table_row *row = NULL;
  577. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  578. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  579. return -EINVAL;
  580. }
  581. if (!k_ioctl->ioctl_ptr) {
  582. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  583. return -EINVAL;
  584. }
  585. if (copy_from_user(&userpayload_info,
  586. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  587. k_ioctl->size))
  588. return -EFAULT;
  589. sync_obj = userpayload_info.sync_obj;
  590. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  591. return -EINVAL;
  592. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  593. row = sync_dev->sync_table + sync_obj;
  594. if (row->state == CAM_SYNC_STATE_INVALID) {
  595. CAM_ERR(CAM_SYNC,
  596. "Error: accessing an uninitialized sync obj = %d",
  597. sync_obj);
  598. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  599. return -EINVAL;
  600. }
  601. list_for_each_entry_safe(user_payload_kernel, temp,
  602. &row->user_payload_list, list) {
  603. if (user_payload_kernel->payload_data[0] ==
  604. userpayload_info.payload[0] &&
  605. user_payload_kernel->payload_data[1] ==
  606. userpayload_info.payload[1]) {
  607. list_del_init(&user_payload_kernel->list);
  608. kfree(user_payload_kernel);
  609. }
  610. }
  611. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  612. return 0;
  613. }
  614. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  615. bool valid_prio, unsigned int cmd, void *arg)
  616. {
  617. int32_t rc;
  618. struct sync_device *sync_dev = video_drvdata(filep);
  619. struct cam_private_ioctl_arg k_ioctl;
  620. if (!sync_dev) {
  621. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  622. return -EINVAL;
  623. }
  624. if (!arg)
  625. return -EINVAL;
  626. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  627. return -ENOIOCTLCMD;
  628. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  629. switch (k_ioctl.id) {
  630. case CAM_SYNC_CREATE:
  631. rc = cam_sync_handle_create(&k_ioctl);
  632. break;
  633. case CAM_SYNC_DESTROY:
  634. rc = cam_sync_handle_destroy(&k_ioctl);
  635. break;
  636. case CAM_SYNC_REGISTER_PAYLOAD:
  637. rc = cam_sync_handle_register_user_payload(
  638. &k_ioctl);
  639. break;
  640. case CAM_SYNC_DEREGISTER_PAYLOAD:
  641. rc = cam_sync_handle_deregister_user_payload(
  642. &k_ioctl);
  643. break;
  644. case CAM_SYNC_SIGNAL:
  645. rc = cam_sync_handle_signal(&k_ioctl);
  646. break;
  647. case CAM_SYNC_MERGE:
  648. rc = cam_sync_handle_merge(&k_ioctl);
  649. break;
  650. case CAM_SYNC_WAIT:
  651. rc = cam_sync_handle_wait(&k_ioctl);
  652. ((struct cam_private_ioctl_arg *)arg)->result =
  653. k_ioctl.result;
  654. break;
  655. default:
  656. rc = -ENOIOCTLCMD;
  657. }
  658. return rc;
  659. }
  660. static unsigned int cam_sync_poll(struct file *f,
  661. struct poll_table_struct *pll_table)
  662. {
  663. int rc = 0;
  664. struct v4l2_fh *eventq = f->private_data;
  665. if (!eventq)
  666. return -EINVAL;
  667. poll_wait(f, &eventq->wait, pll_table);
  668. if (v4l2_event_pending(eventq))
  669. rc = POLLPRI;
  670. return rc;
  671. }
  672. static int cam_sync_open(struct file *filep)
  673. {
  674. int rc;
  675. struct sync_device *sync_dev = video_drvdata(filep);
  676. if (!sync_dev) {
  677. CAM_ERR(CAM_SYNC, "Sync device NULL");
  678. return -ENODEV;
  679. }
  680. mutex_lock(&sync_dev->table_lock);
  681. if (sync_dev->open_cnt >= 1) {
  682. mutex_unlock(&sync_dev->table_lock);
  683. return -EALREADY;
  684. }
  685. rc = v4l2_fh_open(filep);
  686. if (!rc) {
  687. sync_dev->open_cnt++;
  688. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  689. sync_dev->cam_sync_eventq = filep->private_data;
  690. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  691. } else {
  692. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  693. }
  694. mutex_unlock(&sync_dev->table_lock);
  695. return rc;
  696. }
  697. static int cam_sync_close(struct file *filep)
  698. {
  699. int rc = 0;
  700. int i;
  701. struct sync_device *sync_dev = video_drvdata(filep);
  702. if (!sync_dev) {
  703. CAM_ERR(CAM_SYNC, "Sync device NULL");
  704. rc = -ENODEV;
  705. return rc;
  706. }
  707. mutex_lock(&sync_dev->table_lock);
  708. sync_dev->open_cnt--;
  709. if (!sync_dev->open_cnt) {
  710. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  711. struct sync_table_row *row =
  712. sync_dev->sync_table + i;
  713. /*
  714. * Signal all ACTIVE objects as ERR, but we don't
  715. * care about the return status here apart from logging
  716. * it.
  717. */
  718. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  719. rc = cam_sync_signal(i,
  720. CAM_SYNC_STATE_SIGNALED_ERROR);
  721. if (rc < 0)
  722. CAM_ERR(CAM_SYNC,
  723. "Cleanup signal fail idx:%d\n",
  724. i);
  725. }
  726. }
  727. /*
  728. * Flush the work queue to wait for pending signal callbacks to
  729. * finish
  730. */
  731. flush_workqueue(sync_dev->work_queue);
  732. /*
  733. * Now that all callbacks worker threads have finished,
  734. * destroy the sync objects
  735. */
  736. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  737. struct sync_table_row *row =
  738. sync_dev->sync_table + i;
  739. if (row->state != CAM_SYNC_STATE_INVALID) {
  740. rc = cam_sync_destroy(i);
  741. if (rc < 0)
  742. CAM_ERR(CAM_SYNC,
  743. "Cleanup destroy fail:idx:%d\n",
  744. i);
  745. }
  746. }
  747. }
  748. mutex_unlock(&sync_dev->table_lock);
  749. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  750. sync_dev->cam_sync_eventq = NULL;
  751. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  752. v4l2_fh_release(filep);
  753. return rc;
  754. }
  755. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  756. struct v4l2_event *new)
  757. {
  758. struct cam_sync_ev_header *ev_header;
  759. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  760. CAM_ERR(CAM_CRM, "Failed to notify event id %d fence %d statue %d",
  761. old->id, ev_header->sync_obj, ev_header->status);
  762. }
  763. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  764. .merge = cam_sync_event_queue_notify_error,
  765. };
  766. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  767. const struct v4l2_event_subscription *sub)
  768. {
  769. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  770. &cam_sync_v4l2_ops);
  771. }
  772. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  773. const struct v4l2_event_subscription *sub)
  774. {
  775. return v4l2_event_unsubscribe(fh, sub);
  776. }
  777. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  778. .vidioc_subscribe_event = cam_sync_subscribe_event,
  779. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  780. .vidioc_default = cam_sync_dev_ioctl,
  781. };
  782. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  783. .owner = THIS_MODULE,
  784. .open = cam_sync_open,
  785. .release = cam_sync_close,
  786. .poll = cam_sync_poll,
  787. .unlocked_ioctl = video_ioctl2,
  788. #ifdef CONFIG_COMPAT
  789. .compat_ioctl32 = video_ioctl2,
  790. #endif
  791. };
  792. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  793. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  794. struct platform_device *pdev)
  795. {
  796. int rc;
  797. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  798. GFP_KERNEL);
  799. if (!sync_dev->v4l2_dev.mdev)
  800. return -ENOMEM;
  801. media_device_init(sync_dev->v4l2_dev.mdev);
  802. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  803. sizeof(sync_dev->v4l2_dev.mdev->model));
  804. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  805. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  806. if (rc < 0)
  807. goto register_fail;
  808. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  809. if (rc < 0)
  810. goto entity_fail;
  811. return 0;
  812. entity_fail:
  813. media_device_unregister(sync_dev->v4l2_dev.mdev);
  814. register_fail:
  815. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  816. return rc;
  817. }
  818. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  819. {
  820. media_entity_cleanup(&sync_dev->vdev->entity);
  821. media_device_unregister(sync_dev->v4l2_dev.mdev);
  822. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  823. kfree(sync_dev->v4l2_dev.mdev);
  824. }
  825. static void cam_sync_init_entity(struct sync_device *sync_dev)
  826. {
  827. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  828. sync_dev->vdev->entity.name =
  829. video_device_node_name(sync_dev->vdev);
  830. }
  831. #else
  832. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  833. struct platform_device *pdev)
  834. {
  835. return 0;
  836. }
  837. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  838. {
  839. }
  840. static void cam_sync_init_entity(struct sync_device *sync_dev)
  841. {
  842. }
  843. #endif
  844. static int cam_sync_create_debugfs(void)
  845. {
  846. sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
  847. if (!sync_dev->dentry) {
  848. CAM_ERR(CAM_SYNC, "Failed to create sync dir");
  849. return -ENOMEM;
  850. }
  851. if (!debugfs_create_bool("trigger_cb_without_switch",
  852. 0644, sync_dev->dentry,
  853. &trigger_cb_without_switch)) {
  854. CAM_ERR(CAM_SYNC,
  855. "failed to create trigger_cb_without_switch entry");
  856. return -ENOMEM;
  857. }
  858. return 0;
  859. }
  860. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  861. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  862. {
  863. int rc = 0;
  864. uint32_t sync_status = synx_status;
  865. switch (synx_status) {
  866. case SYNX_STATE_ACTIVE:
  867. sync_status = CAM_SYNC_STATE_ACTIVE;
  868. break;
  869. case SYNX_STATE_SIGNALED_SUCCESS:
  870. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  871. break;
  872. case SYNX_STATE_SIGNALED_ERROR:
  873. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  874. break;
  875. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  876. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  877. break;
  878. default:
  879. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  880. synx_status, sync_obj);
  881. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  882. break;
  883. }
  884. rc = cam_sync_signal(sync_obj, sync_status);
  885. if (rc) {
  886. CAM_ERR(CAM_SYNC,
  887. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  888. sync_obj, synx_status, sync_status, rc);
  889. }
  890. return rc;
  891. }
  892. static int cam_sync_register_synx_bind_ops(
  893. struct synx_register_params *object)
  894. {
  895. int rc = 0;
  896. rc = synx_register_ops(object);
  897. if (rc)
  898. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  899. return rc;
  900. }
  901. static void cam_sync_unregister_synx_bind_ops(
  902. struct synx_register_params *object)
  903. {
  904. int rc = 0;
  905. rc = synx_deregister_ops(object);
  906. if (rc)
  907. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  908. }
  909. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  910. {
  911. struct synx_register_params *params = object;
  912. params->name = CAM_SYNC_NAME;
  913. params->type = SYNX_TYPE_CSL;
  914. params->ops.register_callback = cam_sync_register_callback;
  915. params->ops.deregister_callback = cam_sync_deregister_callback;
  916. params->ops.enable_signaling = cam_sync_get_obj_ref;
  917. params->ops.signal = cam_synx_sync_signal;
  918. }
  919. #endif
  920. static int cam_sync_component_bind(struct device *dev,
  921. struct device *master_dev, void *data)
  922. {
  923. int rc;
  924. int idx;
  925. struct platform_device *pdev = to_platform_device(dev);
  926. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  927. if (!sync_dev)
  928. return -ENOMEM;
  929. mutex_init(&sync_dev->table_lock);
  930. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  931. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  932. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  933. sync_dev->vdev = video_device_alloc();
  934. if (!sync_dev->vdev) {
  935. rc = -ENOMEM;
  936. goto vdev_fail;
  937. }
  938. rc = cam_sync_media_controller_init(sync_dev, pdev);
  939. if (rc < 0)
  940. goto mcinit_fail;
  941. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  942. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  943. if (rc < 0)
  944. goto register_fail;
  945. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  946. sizeof(sync_dev->vdev->name));
  947. sync_dev->vdev->release = video_device_release_empty;
  948. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  949. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  950. sync_dev->vdev->minor = -1;
  951. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  952. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  953. rc = video_register_device(sync_dev->vdev,
  954. VFL_TYPE_GRABBER, -1);
  955. if (rc < 0) {
  956. CAM_ERR(CAM_SYNC,
  957. "video device registration failure rc = %d, name = %s, device_caps = %d",
  958. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  959. goto v4l2_fail;
  960. }
  961. cam_sync_init_entity(sync_dev);
  962. video_set_drvdata(sync_dev->vdev, sync_dev);
  963. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  964. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  965. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  966. /*
  967. * We treat zero as invalid handle, so we will keep the 0th bit set
  968. * always
  969. */
  970. set_bit(0, sync_dev->bitmap);
  971. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  972. WQ_HIGHPRI | WQ_UNBOUND, 1);
  973. if (!sync_dev->work_queue) {
  974. CAM_ERR(CAM_SYNC,
  975. "Error: high priority work queue creation failed");
  976. rc = -ENOMEM;
  977. goto v4l2_fail;
  978. }
  979. trigger_cb_without_switch = false;
  980. cam_sync_create_debugfs();
  981. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  982. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  983. cam_sync_configure_synx_obj(&sync_dev->params);
  984. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  985. if (rc)
  986. goto v4l2_fail;
  987. #endif
  988. CAM_DBG(CAM_SYNC, "Component bound successfully");
  989. return rc;
  990. v4l2_fail:
  991. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  992. register_fail:
  993. cam_sync_media_controller_cleanup(sync_dev);
  994. mcinit_fail:
  995. video_unregister_device(sync_dev->vdev);
  996. video_device_release(sync_dev->vdev);
  997. vdev_fail:
  998. mutex_destroy(&sync_dev->table_lock);
  999. kfree(sync_dev);
  1000. return rc;
  1001. }
  1002. static void cam_sync_component_unbind(struct device *dev,
  1003. struct device *master_dev, void *data)
  1004. {
  1005. int i;
  1006. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1007. cam_sync_media_controller_cleanup(sync_dev);
  1008. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1009. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1010. #endif
  1011. video_unregister_device(sync_dev->vdev);
  1012. video_device_release(sync_dev->vdev);
  1013. debugfs_remove_recursive(sync_dev->dentry);
  1014. sync_dev->dentry = NULL;
  1015. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1016. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1017. kfree(sync_dev);
  1018. sync_dev = NULL;
  1019. }
  1020. const static struct component_ops cam_sync_component_ops = {
  1021. .bind = cam_sync_component_bind,
  1022. .unbind = cam_sync_component_unbind,
  1023. };
  1024. static int cam_sync_probe(struct platform_device *pdev)
  1025. {
  1026. int rc = 0;
  1027. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1028. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1029. if (rc)
  1030. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1031. return rc;
  1032. }
  1033. static int cam_sync_remove(struct platform_device *pdev)
  1034. {
  1035. component_del(&pdev->dev, &cam_sync_component_ops);
  1036. return 0;
  1037. }
  1038. static const struct of_device_id cam_sync_dt_match[] = {
  1039. {.compatible = "qcom,cam-sync"},
  1040. {}
  1041. };
  1042. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1043. struct platform_driver cam_sync_driver = {
  1044. .probe = cam_sync_probe,
  1045. .remove = cam_sync_remove,
  1046. .driver = {
  1047. .name = "cam_sync",
  1048. .owner = THIS_MODULE,
  1049. .of_match_table = cam_sync_dt_match,
  1050. .suppress_bind_attrs = true,
  1051. },
  1052. };
  1053. int cam_sync_init(void)
  1054. {
  1055. return platform_driver_register(&cam_sync_driver);
  1056. }
  1057. void cam_sync_exit(void)
  1058. {
  1059. platform_driver_unregister(&cam_sync_driver);
  1060. }
  1061. MODULE_DESCRIPTION("Camera sync driver");
  1062. MODULE_LICENSE("GPL v2");