cam_sync.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "cam_compat.h"
  18. #include "camera_main.h"
  19. #include "cam_req_mgr_workq.h"
  20. struct sync_device *sync_dev;
  21. /*
  22. * Flag to determine whether to enqueue cb of a
  23. * signaled fence onto the workq or invoke it
  24. * directly in the same context
  25. */
  26. static bool trigger_cb_without_switch;
  27. static void cam_sync_print_fence_table(void)
  28. {
  29. int idx;
  30. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  31. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  32. CAM_INFO(CAM_SYNC,
  33. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  34. idx,
  35. sync_dev->sync_table[idx].sync_id,
  36. sync_dev->sync_table[idx].name,
  37. sync_dev->sync_table[idx].type,
  38. sync_dev->sync_table[idx].state,
  39. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  40. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  41. }
  42. }
  43. int cam_sync_create(int32_t *sync_obj, const char *name)
  44. {
  45. int rc;
  46. long idx;
  47. bool bit;
  48. do {
  49. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  50. if (idx >= CAM_SYNC_MAX_OBJS) {
  51. CAM_ERR(CAM_SYNC,
  52. "Error: Unable to create sync idx = %d sync name = %s reached max!",
  53. idx, name);
  54. cam_sync_print_fence_table();
  55. return -ENOMEM;
  56. }
  57. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  58. bit = test_and_set_bit(idx, sync_dev->bitmap);
  59. } while (bit);
  60. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  61. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  62. CAM_SYNC_TYPE_INDV);
  63. if (rc) {
  64. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  65. idx);
  66. clear_bit(idx, sync_dev->bitmap);
  67. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  68. return -EINVAL;
  69. }
  70. *sync_obj = idx;
  71. CAM_DBG(CAM_SYNC, "sync_obj: %s[%i]", name, *sync_obj);
  72. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  73. return rc;
  74. }
  75. int cam_sync_register_callback(sync_callback cb_func,
  76. void *userdata, int32_t sync_obj)
  77. {
  78. struct sync_callback_info *sync_cb;
  79. struct sync_table_row *row = NULL;
  80. int status = 0;
  81. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  82. return -EINVAL;
  83. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  84. row = sync_dev->sync_table + sync_obj;
  85. if (row->state == CAM_SYNC_STATE_INVALID) {
  86. CAM_ERR(CAM_SYNC,
  87. "Error: accessing an uninitialized sync obj %s[%d]",
  88. row->name,
  89. sync_obj);
  90. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  91. return -EINVAL;
  92. }
  93. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  94. if (!sync_cb) {
  95. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  96. return -ENOMEM;
  97. }
  98. /* Trigger callback if sync object is already in SIGNALED state */
  99. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  100. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  101. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  102. (!row->remaining)) {
  103. if (trigger_cb_without_switch) {
  104. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]",
  105. row->name,
  106. sync_obj);
  107. status = row->state;
  108. kfree(sync_cb);
  109. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  110. cb_func(sync_obj, status, userdata);
  111. } else {
  112. sync_cb->callback_func = cb_func;
  113. sync_cb->cb_data = userdata;
  114. sync_cb->sync_obj = sync_obj;
  115. INIT_WORK(&sync_cb->cb_dispatch_work,
  116. cam_sync_util_cb_dispatch);
  117. sync_cb->status = row->state;
  118. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%s[%d]",
  119. row->name,
  120. sync_cb->sync_obj);
  121. sync_cb->workq_scheduled_ts = ktime_get();
  122. queue_work(sync_dev->work_queue,
  123. &sync_cb->cb_dispatch_work);
  124. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  125. }
  126. return 0;
  127. }
  128. sync_cb->callback_func = cb_func;
  129. sync_cb->cb_data = userdata;
  130. sync_cb->sync_obj = sync_obj;
  131. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  132. list_add_tail(&sync_cb->list, &row->callback_list);
  133. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  134. return 0;
  135. }
  136. int cam_sync_deregister_callback(sync_callback cb_func,
  137. void *userdata, int32_t sync_obj)
  138. {
  139. struct sync_table_row *row = NULL;
  140. struct sync_callback_info *sync_cb, *temp;
  141. bool found = false;
  142. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  143. return -EINVAL;
  144. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  145. row = sync_dev->sync_table + sync_obj;
  146. if (row->state == CAM_SYNC_STATE_INVALID) {
  147. CAM_ERR(CAM_SYNC,
  148. "Error: accessing an uninitialized sync obj = %s[%d]",
  149. row->name,
  150. sync_obj);
  151. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  152. return -EINVAL;
  153. }
  154. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]",
  155. row->name,
  156. sync_obj);
  157. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  158. if (sync_cb->callback_func == cb_func &&
  159. sync_cb->cb_data == userdata) {
  160. list_del_init(&sync_cb->list);
  161. kfree(sync_cb);
  162. found = true;
  163. }
  164. }
  165. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  166. return found ? 0 : -ENOENT;
  167. }
  168. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  169. {
  170. struct sync_table_row *row = NULL;
  171. struct sync_table_row *parent_row = NULL;
  172. struct sync_parent_info *parent_info, *temp_parent_info;
  173. struct list_head parents_list;
  174. int rc = 0;
  175. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  176. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  177. sync_obj, CAM_SYNC_MAX_OBJS);
  178. return -EINVAL;
  179. }
  180. row = sync_dev->sync_table + sync_obj;
  181. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  182. if (row->state == CAM_SYNC_STATE_INVALID) {
  183. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  184. CAM_ERR(CAM_SYNC,
  185. "Error: accessing an uninitialized sync obj = %s[%d]",
  186. row->name,
  187. sync_obj);
  188. return -EINVAL;
  189. }
  190. if (row->type == CAM_SYNC_TYPE_GROUP) {
  191. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  192. CAM_ERR(CAM_SYNC,
  193. "Error: Signaling a GROUP sync object = %s[%d]",
  194. row->name,
  195. sync_obj);
  196. return -EINVAL;
  197. }
  198. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  199. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  200. CAM_ERR(CAM_SYNC,
  201. "Error: Sync object already signaled sync_obj = %s[%d]",
  202. row->name,
  203. sync_obj);
  204. return -EALREADY;
  205. }
  206. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  207. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  208. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  209. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  210. CAM_ERR(CAM_SYNC,
  211. "Error: signaling with undefined status = %d event reason = %u",
  212. status, event_cause);
  213. return -EINVAL;
  214. }
  215. if (!atomic_dec_and_test(&row->ref_cnt)) {
  216. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  217. return 0;
  218. }
  219. row->state = status;
  220. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  221. /* copy parent list to local and release child lock */
  222. INIT_LIST_HEAD(&parents_list);
  223. list_splice_init(&row->parents_list, &parents_list);
  224. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  225. if (list_empty(&parents_list))
  226. return 0;
  227. /*
  228. * Now iterate over all parents of this object and if they too need to
  229. * be signaled dispatch cb's
  230. */
  231. list_for_each_entry_safe(parent_info,
  232. temp_parent_info,
  233. &parents_list,
  234. list) {
  235. parent_row = sync_dev->sync_table + parent_info->sync_id;
  236. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  237. parent_row->remaining--;
  238. rc = cam_sync_util_update_parent_state(
  239. parent_row,
  240. status);
  241. if (rc) {
  242. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  243. parent_row->state);
  244. spin_unlock_bh(
  245. &sync_dev->row_spinlocks[parent_info->sync_id]);
  246. kfree(parent_info);
  247. continue;
  248. }
  249. if (!parent_row->remaining)
  250. cam_sync_util_dispatch_signaled_cb(
  251. parent_info->sync_id, parent_row->state,
  252. event_cause);
  253. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  254. list_del_init(&parent_info->list);
  255. kfree(parent_info);
  256. }
  257. return 0;
  258. }
  259. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  260. {
  261. int rc;
  262. long idx = 0;
  263. bool bit;
  264. int i = 0;
  265. if (!sync_obj || !merged_obj) {
  266. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  267. return -EINVAL;
  268. }
  269. if (num_objs <= 1) {
  270. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  271. return -EINVAL;
  272. }
  273. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  274. != num_objs) {
  275. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  276. return -EINVAL;
  277. }
  278. for (i = 0; i < num_objs; i++) {
  279. rc = cam_sync_check_valid(sync_obj[i]);
  280. if (rc) {
  281. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  282. i, sync_obj[i]);
  283. return rc;
  284. }
  285. }
  286. do {
  287. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  288. if (idx >= CAM_SYNC_MAX_OBJS)
  289. return -ENOMEM;
  290. bit = test_and_set_bit(idx, sync_dev->bitmap);
  291. } while (bit);
  292. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  293. rc = cam_sync_init_group_object(sync_dev->sync_table,
  294. idx, sync_obj,
  295. num_objs);
  296. if (rc < 0) {
  297. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  298. idx);
  299. clear_bit(idx, sync_dev->bitmap);
  300. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  301. return -EINVAL;
  302. }
  303. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  304. *merged_obj = idx;
  305. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  306. return 0;
  307. }
  308. int cam_sync_get_obj_ref(int32_t sync_obj)
  309. {
  310. struct sync_table_row *row = NULL;
  311. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  312. return -EINVAL;
  313. row = sync_dev->sync_table + sync_obj;
  314. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  315. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  316. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  317. CAM_ERR(CAM_SYNC,
  318. "Error: accessing an uninitialized sync obj = %s[%d]",
  319. row->name,
  320. sync_obj);
  321. return -EINVAL;
  322. }
  323. atomic_inc(&row->ref_cnt);
  324. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  325. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  326. return 0;
  327. }
  328. int cam_sync_put_obj_ref(int32_t sync_obj)
  329. {
  330. struct sync_table_row *row = NULL;
  331. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  332. return -EINVAL;
  333. row = sync_dev->sync_table + sync_obj;
  334. atomic_dec(&row->ref_cnt);
  335. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  336. return 0;
  337. }
  338. int cam_sync_destroy(int32_t sync_obj)
  339. {
  340. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  341. }
  342. int cam_sync_check_valid(int32_t sync_obj)
  343. {
  344. struct sync_table_row *row = NULL;
  345. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  346. return -EINVAL;
  347. row = sync_dev->sync_table + sync_obj;
  348. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  349. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]",
  350. row->name,
  351. sync_obj);
  352. return -EINVAL;
  353. }
  354. if (row->state == CAM_SYNC_STATE_INVALID) {
  355. CAM_ERR(CAM_SYNC,
  356. "Error: accessing an uninitialized sync obj = %s[%d]",
  357. row->name,
  358. sync_obj);
  359. return -EINVAL;
  360. }
  361. return 0;
  362. }
  363. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  364. {
  365. unsigned long timeleft;
  366. int rc = -EINVAL;
  367. struct sync_table_row *row = NULL;
  368. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  369. return -EINVAL;
  370. row = sync_dev->sync_table + sync_obj;
  371. if (row->state == CAM_SYNC_STATE_INVALID) {
  372. CAM_ERR(CAM_SYNC,
  373. "Error: accessing an uninitialized sync obj = %s[%d]",
  374. row->name,
  375. sync_obj);
  376. return -EINVAL;
  377. }
  378. timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
  379. msecs_to_jiffies(timeout_ms));
  380. if (!timeleft) {
  381. CAM_ERR(CAM_SYNC,
  382. "Error: timed out for sync obj = %s[%d]", row->name, sync_obj);
  383. rc = -ETIMEDOUT;
  384. } else {
  385. switch (row->state) {
  386. case CAM_SYNC_STATE_INVALID:
  387. case CAM_SYNC_STATE_ACTIVE:
  388. case CAM_SYNC_STATE_SIGNALED_ERROR:
  389. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  390. CAM_ERR(CAM_SYNC,
  391. "Error: Wait on invalid state = %d, obj = %d, name = %s",
  392. row->state, sync_obj, row->name);
  393. rc = -EINVAL;
  394. break;
  395. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  396. rc = 0;
  397. break;
  398. default:
  399. rc = -EINVAL;
  400. break;
  401. }
  402. }
  403. return rc;
  404. }
  405. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  406. {
  407. struct cam_sync_info sync_create;
  408. int result;
  409. if (k_ioctl->size != sizeof(struct cam_sync_info))
  410. return -EINVAL;
  411. if (!k_ioctl->ioctl_ptr)
  412. return -EINVAL;
  413. if (copy_from_user(&sync_create,
  414. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  415. k_ioctl->size))
  416. return -EFAULT;
  417. result = cam_sync_create(&sync_create.sync_obj,
  418. sync_create.name);
  419. if (!result)
  420. if (copy_to_user(
  421. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  422. &sync_create,
  423. k_ioctl->size))
  424. return -EFAULT;
  425. return result;
  426. }
  427. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  428. {
  429. int rc = 0;
  430. struct cam_sync_signal sync_signal;
  431. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  432. return -EINVAL;
  433. if (!k_ioctl->ioctl_ptr)
  434. return -EINVAL;
  435. if (copy_from_user(&sync_signal,
  436. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  437. k_ioctl->size))
  438. return -EFAULT;
  439. /* need to get ref for UMD signaled fences */
  440. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  441. if (rc) {
  442. CAM_DBG(CAM_SYNC,
  443. "Error: cannot signal an uninitialized sync obj = %d",
  444. sync_signal.sync_obj);
  445. return rc;
  446. }
  447. return cam_sync_signal(sync_signal.sync_obj,
  448. sync_signal.sync_state,
  449. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  450. }
  451. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  452. {
  453. struct cam_sync_merge sync_merge;
  454. uint32_t *sync_objs;
  455. uint32_t num_objs;
  456. uint32_t size;
  457. int result;
  458. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  459. return -EINVAL;
  460. if (!k_ioctl->ioctl_ptr)
  461. return -EINVAL;
  462. if (copy_from_user(&sync_merge,
  463. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  464. k_ioctl->size))
  465. return -EFAULT;
  466. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  467. return -EINVAL;
  468. size = sizeof(uint32_t) * sync_merge.num_objs;
  469. sync_objs = kzalloc(size, GFP_ATOMIC);
  470. if (!sync_objs)
  471. return -ENOMEM;
  472. if (copy_from_user(sync_objs,
  473. u64_to_user_ptr(sync_merge.sync_objs),
  474. sizeof(uint32_t) * sync_merge.num_objs)) {
  475. kfree(sync_objs);
  476. return -EFAULT;
  477. }
  478. num_objs = sync_merge.num_objs;
  479. result = cam_sync_merge(sync_objs,
  480. num_objs,
  481. &sync_merge.merged);
  482. if (!result)
  483. if (copy_to_user(
  484. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  485. &sync_merge,
  486. k_ioctl->size)) {
  487. kfree(sync_objs);
  488. return -EFAULT;
  489. }
  490. kfree(sync_objs);
  491. return result;
  492. }
  493. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  494. {
  495. struct cam_sync_wait sync_wait;
  496. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  497. return -EINVAL;
  498. if (!k_ioctl->ioctl_ptr)
  499. return -EINVAL;
  500. if (copy_from_user(&sync_wait,
  501. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  502. k_ioctl->size))
  503. return -EFAULT;
  504. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  505. sync_wait.timeout_ms);
  506. return 0;
  507. }
  508. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  509. {
  510. struct cam_sync_info sync_create;
  511. if (k_ioctl->size != sizeof(struct cam_sync_info))
  512. return -EINVAL;
  513. if (!k_ioctl->ioctl_ptr)
  514. return -EINVAL;
  515. if (copy_from_user(&sync_create,
  516. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  517. k_ioctl->size))
  518. return -EFAULT;
  519. return cam_sync_destroy(sync_create.sync_obj);
  520. }
  521. static int cam_sync_handle_register_user_payload(
  522. struct cam_private_ioctl_arg *k_ioctl)
  523. {
  524. struct cam_sync_userpayload_info userpayload_info;
  525. struct sync_user_payload *user_payload_kernel;
  526. struct sync_user_payload *user_payload_iter;
  527. struct sync_user_payload *temp_upayload_kernel;
  528. uint32_t sync_obj;
  529. struct sync_table_row *row = NULL;
  530. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  531. return -EINVAL;
  532. if (!k_ioctl->ioctl_ptr)
  533. return -EINVAL;
  534. if (copy_from_user(&userpayload_info,
  535. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  536. k_ioctl->size))
  537. return -EFAULT;
  538. sync_obj = userpayload_info.sync_obj;
  539. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  540. return -EINVAL;
  541. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  542. if (!user_payload_kernel)
  543. return -ENOMEM;
  544. memcpy(user_payload_kernel->payload_data,
  545. userpayload_info.payload,
  546. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  547. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  548. row = sync_dev->sync_table + sync_obj;
  549. if (row->state == CAM_SYNC_STATE_INVALID) {
  550. CAM_ERR(CAM_SYNC,
  551. "Error: accessing an uninitialized sync obj = %s[%d]",
  552. row->name,
  553. sync_obj);
  554. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  555. kfree(user_payload_kernel);
  556. return -EINVAL;
  557. }
  558. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  559. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  560. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  561. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  562. sync_obj,
  563. row->state,
  564. user_payload_kernel->payload_data,
  565. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  566. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  567. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  568. kfree(user_payload_kernel);
  569. return 0;
  570. }
  571. list_for_each_entry_safe(user_payload_iter,
  572. temp_upayload_kernel,
  573. &row->user_payload_list,
  574. list) {
  575. if (user_payload_iter->payload_data[0] ==
  576. user_payload_kernel->payload_data[0] &&
  577. user_payload_iter->payload_data[1] ==
  578. user_payload_kernel->payload_data[1]) {
  579. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  580. kfree(user_payload_kernel);
  581. return -EALREADY;
  582. }
  583. }
  584. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  585. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  586. return 0;
  587. }
  588. static int cam_sync_handle_deregister_user_payload(
  589. struct cam_private_ioctl_arg *k_ioctl)
  590. {
  591. struct cam_sync_userpayload_info userpayload_info;
  592. struct sync_user_payload *user_payload_kernel, *temp;
  593. uint32_t sync_obj;
  594. struct sync_table_row *row = NULL;
  595. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  596. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  597. return -EINVAL;
  598. }
  599. if (!k_ioctl->ioctl_ptr) {
  600. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  601. return -EINVAL;
  602. }
  603. if (copy_from_user(&userpayload_info,
  604. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  605. k_ioctl->size))
  606. return -EFAULT;
  607. sync_obj = userpayload_info.sync_obj;
  608. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  609. return -EINVAL;
  610. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  611. row = sync_dev->sync_table + sync_obj;
  612. if (row->state == CAM_SYNC_STATE_INVALID) {
  613. CAM_ERR(CAM_SYNC,
  614. "Error: accessing an uninitialized sync obj = %s[%d]",
  615. row->name,
  616. sync_obj);
  617. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  618. return -EINVAL;
  619. }
  620. list_for_each_entry_safe(user_payload_kernel, temp,
  621. &row->user_payload_list, list) {
  622. if (user_payload_kernel->payload_data[0] ==
  623. userpayload_info.payload[0] &&
  624. user_payload_kernel->payload_data[1] ==
  625. userpayload_info.payload[1]) {
  626. list_del_init(&user_payload_kernel->list);
  627. kfree(user_payload_kernel);
  628. }
  629. }
  630. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  631. return 0;
  632. }
  633. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  634. bool valid_prio, unsigned int cmd, void *arg)
  635. {
  636. int32_t rc;
  637. struct sync_device *sync_dev = video_drvdata(filep);
  638. struct cam_private_ioctl_arg k_ioctl;
  639. if (!sync_dev) {
  640. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  641. return -EINVAL;
  642. }
  643. if (!arg)
  644. return -EINVAL;
  645. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  646. return -ENOIOCTLCMD;
  647. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  648. switch (k_ioctl.id) {
  649. case CAM_SYNC_CREATE:
  650. rc = cam_sync_handle_create(&k_ioctl);
  651. break;
  652. case CAM_SYNC_DESTROY:
  653. rc = cam_sync_handle_destroy(&k_ioctl);
  654. break;
  655. case CAM_SYNC_REGISTER_PAYLOAD:
  656. rc = cam_sync_handle_register_user_payload(
  657. &k_ioctl);
  658. break;
  659. case CAM_SYNC_DEREGISTER_PAYLOAD:
  660. rc = cam_sync_handle_deregister_user_payload(
  661. &k_ioctl);
  662. break;
  663. case CAM_SYNC_SIGNAL:
  664. rc = cam_sync_handle_signal(&k_ioctl);
  665. break;
  666. case CAM_SYNC_MERGE:
  667. rc = cam_sync_handle_merge(&k_ioctl);
  668. break;
  669. case CAM_SYNC_WAIT:
  670. rc = cam_sync_handle_wait(&k_ioctl);
  671. ((struct cam_private_ioctl_arg *)arg)->result =
  672. k_ioctl.result;
  673. break;
  674. default:
  675. rc = -ENOIOCTLCMD;
  676. }
  677. return rc;
  678. }
  679. static unsigned int cam_sync_poll(struct file *f,
  680. struct poll_table_struct *pll_table)
  681. {
  682. int rc = 0;
  683. struct v4l2_fh *eventq = f->private_data;
  684. if (!eventq)
  685. return -EINVAL;
  686. poll_wait(f, &eventq->wait, pll_table);
  687. if (v4l2_event_pending(eventq))
  688. rc = POLLPRI;
  689. return rc;
  690. }
  691. static int cam_sync_open(struct file *filep)
  692. {
  693. int rc;
  694. struct sync_device *sync_dev = video_drvdata(filep);
  695. if (!sync_dev) {
  696. CAM_ERR(CAM_SYNC, "Sync device NULL");
  697. return -ENODEV;
  698. }
  699. mutex_lock(&sync_dev->table_lock);
  700. if (sync_dev->open_cnt >= 1) {
  701. mutex_unlock(&sync_dev->table_lock);
  702. return -EALREADY;
  703. }
  704. rc = v4l2_fh_open(filep);
  705. if (!rc) {
  706. sync_dev->open_cnt++;
  707. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  708. sync_dev->cam_sync_eventq = filep->private_data;
  709. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  710. } else {
  711. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  712. }
  713. mutex_unlock(&sync_dev->table_lock);
  714. return rc;
  715. }
  716. static int cam_sync_close(struct file *filep)
  717. {
  718. int rc = 0;
  719. int i;
  720. struct sync_device *sync_dev = video_drvdata(filep);
  721. if (!sync_dev) {
  722. CAM_ERR(CAM_SYNC, "Sync device NULL");
  723. rc = -ENODEV;
  724. return rc;
  725. }
  726. mutex_lock(&sync_dev->table_lock);
  727. sync_dev->open_cnt--;
  728. if (!sync_dev->open_cnt) {
  729. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  730. struct sync_table_row *row =
  731. sync_dev->sync_table + i;
  732. /*
  733. * Signal all ACTIVE objects as ERR, but we don't
  734. * care about the return status here apart from logging
  735. * it.
  736. */
  737. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  738. rc = cam_sync_signal(i,
  739. CAM_SYNC_STATE_SIGNALED_ERROR,
  740. CAM_SYNC_COMMON_RELEASE_EVENT);
  741. if (rc < 0)
  742. CAM_ERR(CAM_SYNC,
  743. "Cleanup signal fail idx:%d\n",
  744. i);
  745. }
  746. }
  747. /*
  748. * Flush the work queue to wait for pending signal callbacks to
  749. * finish
  750. */
  751. flush_workqueue(sync_dev->work_queue);
  752. /*
  753. * Now that all callbacks worker threads have finished,
  754. * destroy the sync objects
  755. */
  756. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  757. struct sync_table_row *row =
  758. sync_dev->sync_table + i;
  759. if (row->state != CAM_SYNC_STATE_INVALID) {
  760. rc = cam_sync_destroy(i);
  761. if (rc < 0)
  762. CAM_ERR(CAM_SYNC,
  763. "Cleanup destroy fail:idx:%d\n",
  764. i);
  765. }
  766. }
  767. }
  768. mutex_unlock(&sync_dev->table_lock);
  769. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  770. sync_dev->cam_sync_eventq = NULL;
  771. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  772. v4l2_fh_release(filep);
  773. return rc;
  774. }
  775. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  776. struct v4l2_event *new)
  777. {
  778. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  779. struct cam_sync_ev_header_v2 *ev_header;
  780. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  781. CAM_ERR(CAM_CRM,
  782. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  783. old->id, ev_header->sync_obj, ev_header->status,
  784. ev_header->evt_param[0], ev_header->evt_param[1],
  785. ev_header->evt_param[2], ev_header->evt_param[3]);
  786. } else {
  787. struct cam_sync_ev_header *ev_header;
  788. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  789. CAM_ERR(CAM_CRM,
  790. "Failed to notify event id %d fence %d statue %d",
  791. old->id, ev_header->sync_obj, ev_header->status);
  792. }
  793. }
  794. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  795. .merge = cam_sync_event_queue_notify_error,
  796. };
  797. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  798. const struct v4l2_event_subscription *sub)
  799. {
  800. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  801. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  802. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  803. return -EINVAL;
  804. }
  805. sync_dev->version = sub->type;
  806. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  807. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  808. &cam_sync_v4l2_ops);
  809. }
  810. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  811. const struct v4l2_event_subscription *sub)
  812. {
  813. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  814. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  815. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  816. return -EINVAL;
  817. }
  818. return v4l2_event_unsubscribe(fh, sub);
  819. }
  820. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  821. .vidioc_subscribe_event = cam_sync_subscribe_event,
  822. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  823. .vidioc_default = cam_sync_dev_ioctl,
  824. };
  825. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  826. .owner = THIS_MODULE,
  827. .open = cam_sync_open,
  828. .release = cam_sync_close,
  829. .poll = cam_sync_poll,
  830. .unlocked_ioctl = video_ioctl2,
  831. #ifdef CONFIG_COMPAT
  832. .compat_ioctl32 = video_ioctl2,
  833. #endif
  834. };
  835. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  836. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  837. struct platform_device *pdev)
  838. {
  839. int rc;
  840. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  841. GFP_KERNEL);
  842. if (!sync_dev->v4l2_dev.mdev)
  843. return -ENOMEM;
  844. media_device_init(sync_dev->v4l2_dev.mdev);
  845. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  846. sizeof(sync_dev->v4l2_dev.mdev->model));
  847. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  848. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  849. if (rc < 0)
  850. goto register_fail;
  851. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  852. if (rc < 0)
  853. goto entity_fail;
  854. return 0;
  855. entity_fail:
  856. media_device_unregister(sync_dev->v4l2_dev.mdev);
  857. register_fail:
  858. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  859. return rc;
  860. }
  861. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  862. {
  863. media_entity_cleanup(&sync_dev->vdev->entity);
  864. media_device_unregister(sync_dev->v4l2_dev.mdev);
  865. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  866. kfree(sync_dev->v4l2_dev.mdev);
  867. }
  868. static void cam_sync_init_entity(struct sync_device *sync_dev)
  869. {
  870. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  871. sync_dev->vdev->entity.name =
  872. video_device_node_name(sync_dev->vdev);
  873. }
  874. #else
  875. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  876. struct platform_device *pdev)
  877. {
  878. return 0;
  879. }
  880. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  881. {
  882. }
  883. static void cam_sync_init_entity(struct sync_device *sync_dev)
  884. {
  885. }
  886. #endif
  887. static int cam_sync_create_debugfs(void)
  888. {
  889. int rc = 0;
  890. struct dentry *dbgfileptr = NULL;
  891. dbgfileptr = debugfs_create_dir("camera_sync", NULL);
  892. if (!dbgfileptr) {
  893. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  894. rc = -ENOENT;
  895. goto end;
  896. }
  897. /* Store parent inode for cleanup in caller */
  898. sync_dev->dentry = dbgfileptr;
  899. dbgfileptr = debugfs_create_bool("trigger_cb_without_switch", 0644,
  900. sync_dev->dentry, &trigger_cb_without_switch);
  901. if (IS_ERR(dbgfileptr)) {
  902. if (PTR_ERR(dbgfileptr) == -ENODEV)
  903. CAM_WARN(CAM_SYNC, "DebugFS not enabled in kernel!");
  904. else
  905. rc = PTR_ERR(dbgfileptr);
  906. }
  907. end:
  908. return rc;
  909. }
  910. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  911. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  912. {
  913. int rc = 0;
  914. uint32_t sync_status = synx_status;
  915. switch (synx_status) {
  916. case SYNX_STATE_ACTIVE:
  917. sync_status = CAM_SYNC_STATE_ACTIVE;
  918. break;
  919. case SYNX_STATE_SIGNALED_SUCCESS:
  920. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  921. break;
  922. case SYNX_STATE_SIGNALED_ERROR:
  923. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  924. break;
  925. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  926. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  927. break;
  928. default:
  929. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  930. synx_status, sync_obj);
  931. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  932. break;
  933. }
  934. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  935. if (rc) {
  936. CAM_ERR(CAM_SYNC,
  937. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  938. sync_obj, synx_status, sync_status, rc);
  939. }
  940. return rc;
  941. }
  942. static int cam_sync_register_synx_bind_ops(
  943. struct synx_register_params *object)
  944. {
  945. int rc = 0;
  946. rc = synx_register_ops(object);
  947. if (rc)
  948. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  949. return rc;
  950. }
  951. static void cam_sync_unregister_synx_bind_ops(
  952. struct synx_register_params *object)
  953. {
  954. int rc = 0;
  955. rc = synx_deregister_ops(object);
  956. if (rc)
  957. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  958. }
  959. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  960. {
  961. struct synx_register_params *params = object;
  962. params->name = CAM_SYNC_NAME;
  963. params->type = SYNX_TYPE_CSL;
  964. params->ops.register_callback = cam_sync_register_callback;
  965. params->ops.deregister_callback = cam_sync_deregister_callback;
  966. params->ops.enable_signaling = cam_sync_get_obj_ref;
  967. params->ops.signal = cam_synx_sync_signal;
  968. }
  969. #endif
  970. static int cam_sync_component_bind(struct device *dev,
  971. struct device *master_dev, void *data)
  972. {
  973. int rc;
  974. int idx;
  975. struct platform_device *pdev = to_platform_device(dev);
  976. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  977. if (!sync_dev)
  978. return -ENOMEM;
  979. mutex_init(&sync_dev->table_lock);
  980. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  981. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  982. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  983. sync_dev->vdev = video_device_alloc();
  984. if (!sync_dev->vdev) {
  985. rc = -ENOMEM;
  986. goto vdev_fail;
  987. }
  988. rc = cam_sync_media_controller_init(sync_dev, pdev);
  989. if (rc < 0)
  990. goto mcinit_fail;
  991. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  992. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  993. if (rc < 0)
  994. goto register_fail;
  995. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  996. sizeof(sync_dev->vdev->name));
  997. sync_dev->vdev->release = video_device_release_empty;
  998. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  999. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  1000. sync_dev->vdev->minor = -1;
  1001. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  1002. sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO;
  1003. rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1);
  1004. if (rc < 0) {
  1005. CAM_ERR(CAM_SYNC,
  1006. "video device registration failure rc = %d, name = %s, device_caps = %d",
  1007. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  1008. goto v4l2_fail;
  1009. }
  1010. cam_sync_init_entity(sync_dev);
  1011. video_set_drvdata(sync_dev->vdev, sync_dev);
  1012. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  1013. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  1014. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  1015. /*
  1016. * We treat zero as invalid handle, so we will keep the 0th bit set
  1017. * always
  1018. */
  1019. set_bit(0, sync_dev->bitmap);
  1020. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  1021. WQ_HIGHPRI | WQ_UNBOUND, 1);
  1022. if (!sync_dev->work_queue) {
  1023. CAM_ERR(CAM_SYNC,
  1024. "Error: high priority work queue creation failed");
  1025. rc = -ENOMEM;
  1026. goto v4l2_fail;
  1027. }
  1028. trigger_cb_without_switch = false;
  1029. cam_sync_create_debugfs();
  1030. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1031. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  1032. cam_sync_configure_synx_obj(&sync_dev->params);
  1033. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  1034. if (rc)
  1035. goto v4l2_fail;
  1036. #endif
  1037. CAM_DBG(CAM_SYNC, "Component bound successfully");
  1038. return rc;
  1039. v4l2_fail:
  1040. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1041. register_fail:
  1042. cam_sync_media_controller_cleanup(sync_dev);
  1043. mcinit_fail:
  1044. video_unregister_device(sync_dev->vdev);
  1045. video_device_release(sync_dev->vdev);
  1046. vdev_fail:
  1047. mutex_destroy(&sync_dev->table_lock);
  1048. kfree(sync_dev);
  1049. return rc;
  1050. }
  1051. static void cam_sync_component_unbind(struct device *dev,
  1052. struct device *master_dev, void *data)
  1053. {
  1054. int i;
  1055. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1056. cam_sync_media_controller_cleanup(sync_dev);
  1057. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1058. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1059. #endif
  1060. video_unregister_device(sync_dev->vdev);
  1061. video_device_release(sync_dev->vdev);
  1062. debugfs_remove_recursive(sync_dev->dentry);
  1063. sync_dev->dentry = NULL;
  1064. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1065. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1066. kfree(sync_dev);
  1067. sync_dev = NULL;
  1068. }
  1069. const static struct component_ops cam_sync_component_ops = {
  1070. .bind = cam_sync_component_bind,
  1071. .unbind = cam_sync_component_unbind,
  1072. };
  1073. static int cam_sync_probe(struct platform_device *pdev)
  1074. {
  1075. int rc = 0;
  1076. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1077. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1078. if (rc)
  1079. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1080. return rc;
  1081. }
  1082. static int cam_sync_remove(struct platform_device *pdev)
  1083. {
  1084. component_del(&pdev->dev, &cam_sync_component_ops);
  1085. return 0;
  1086. }
  1087. static const struct of_device_id cam_sync_dt_match[] = {
  1088. {.compatible = "qcom,cam-sync"},
  1089. {}
  1090. };
  1091. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1092. struct platform_driver cam_sync_driver = {
  1093. .probe = cam_sync_probe,
  1094. .remove = cam_sync_remove,
  1095. .driver = {
  1096. .name = "cam_sync",
  1097. .owner = THIS_MODULE,
  1098. .of_match_table = cam_sync_dt_match,
  1099. .suppress_bind_attrs = true,
  1100. },
  1101. };
  1102. int cam_sync_init(void)
  1103. {
  1104. return platform_driver_register(&cam_sync_driver);
  1105. }
  1106. void cam_sync_exit(void)
  1107. {
  1108. platform_driver_unregister(&cam_sync_driver);
  1109. }
  1110. MODULE_DESCRIPTION("Camera sync driver");
  1111. MODULE_LICENSE("GPL v2");