cam_sync.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #include "cam_sync_util.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_common_util.h"
  14. #include "camera_main.h"
  15. #ifdef CONFIG_MSM_GLOBAL_SYNX
  16. #include <synx_api.h>
  17. #endif
  18. struct sync_device *sync_dev;
  19. /*
  20. * Flag to determine whether to enqueue cb of a
  21. * signaled fence onto the workq or invoke it
  22. * directly in the same context
  23. */
  24. static bool trigger_cb_without_switch;
  25. static void cam_sync_print_fence_table(void)
  26. {
  27. int idx;
  28. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  29. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  30. CAM_INFO(CAM_SYNC,
  31. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  32. idx,
  33. sync_dev->sync_table[idx].sync_id,
  34. sync_dev->sync_table[idx].name,
  35. sync_dev->sync_table[idx].type,
  36. sync_dev->sync_table[idx].state,
  37. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  38. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  39. }
  40. }
  41. int cam_sync_create(int32_t *sync_obj, const char *name)
  42. {
  43. int rc;
  44. long idx;
  45. bool bit;
  46. do {
  47. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  48. if (idx >= CAM_SYNC_MAX_OBJS) {
  49. CAM_ERR(CAM_SYNC,
  50. "Error: Unable to create sync idx = %d reached max!",
  51. idx);
  52. cam_sync_print_fence_table();
  53. return -ENOMEM;
  54. }
  55. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  56. bit = test_and_set_bit(idx, sync_dev->bitmap);
  57. } while (bit);
  58. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  59. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  60. CAM_SYNC_TYPE_INDV);
  61. if (rc) {
  62. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  63. idx);
  64. clear_bit(idx, sync_dev->bitmap);
  65. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  66. return -EINVAL;
  67. }
  68. *sync_obj = idx;
  69. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  70. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  71. return rc;
  72. }
  73. int cam_sync_register_callback(sync_callback cb_func,
  74. void *userdata, int32_t sync_obj)
  75. {
  76. struct sync_callback_info *sync_cb;
  77. struct sync_table_row *row = NULL;
  78. int status = 0;
  79. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  80. return -EINVAL;
  81. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  82. row = sync_dev->sync_table + sync_obj;
  83. if (row->state == CAM_SYNC_STATE_INVALID) {
  84. CAM_ERR(CAM_SYNC,
  85. "Error: accessing an uninitialized sync obj %d",
  86. sync_obj);
  87. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  88. return -EINVAL;
  89. }
  90. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  91. if (!sync_cb) {
  92. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  93. return -ENOMEM;
  94. }
  95. /* Trigger callback if sync object is already in SIGNALED state */
  96. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  97. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
  98. (!row->remaining)) {
  99. if (trigger_cb_without_switch) {
  100. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  101. sync_obj);
  102. status = row->state;
  103. kfree(sync_cb);
  104. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  105. cb_func(sync_obj, status, userdata);
  106. } else {
  107. sync_cb->callback_func = cb_func;
  108. sync_cb->cb_data = userdata;
  109. sync_cb->sync_obj = sync_obj;
  110. INIT_WORK(&sync_cb->cb_dispatch_work,
  111. cam_sync_util_cb_dispatch);
  112. sync_cb->status = row->state;
  113. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  114. sync_cb->sync_obj);
  115. queue_work(sync_dev->work_queue,
  116. &sync_cb->cb_dispatch_work);
  117. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  118. }
  119. return 0;
  120. }
  121. sync_cb->callback_func = cb_func;
  122. sync_cb->cb_data = userdata;
  123. sync_cb->sync_obj = sync_obj;
  124. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  125. list_add_tail(&sync_cb->list, &row->callback_list);
  126. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  127. return 0;
  128. }
  129. int cam_sync_deregister_callback(sync_callback cb_func,
  130. void *userdata, int32_t sync_obj)
  131. {
  132. struct sync_table_row *row = NULL;
  133. struct sync_callback_info *sync_cb, *temp;
  134. bool found = false;
  135. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  136. return -EINVAL;
  137. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  138. row = sync_dev->sync_table + sync_obj;
  139. if (row->state == CAM_SYNC_STATE_INVALID) {
  140. CAM_ERR(CAM_SYNC,
  141. "Error: accessing an uninitialized sync obj = %d",
  142. sync_obj);
  143. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  144. return -EINVAL;
  145. }
  146. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  147. sync_obj);
  148. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  149. if (sync_cb->callback_func == cb_func &&
  150. sync_cb->cb_data == userdata) {
  151. list_del_init(&sync_cb->list);
  152. kfree(sync_cb);
  153. found = true;
  154. }
  155. }
  156. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  157. return found ? 0 : -ENOENT;
  158. }
  159. int cam_sync_signal(int32_t sync_obj, uint32_t status)
  160. {
  161. struct sync_table_row *row = NULL;
  162. struct sync_table_row *parent_row = NULL;
  163. struct sync_parent_info *parent_info, *temp_parent_info;
  164. struct list_head parents_list;
  165. int rc = 0;
  166. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  167. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  168. sync_obj, CAM_SYNC_MAX_OBJS);
  169. return -EINVAL;
  170. }
  171. row = sync_dev->sync_table + sync_obj;
  172. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  173. if (row->state == CAM_SYNC_STATE_INVALID) {
  174. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  175. CAM_ERR(CAM_SYNC,
  176. "Error: accessing an uninitialized sync obj = %d",
  177. sync_obj);
  178. return -EINVAL;
  179. }
  180. if (row->type == CAM_SYNC_TYPE_GROUP) {
  181. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  182. CAM_ERR(CAM_SYNC,
  183. "Error: Signaling a GROUP sync object = %d",
  184. sync_obj);
  185. return -EINVAL;
  186. }
  187. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  188. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  189. CAM_ERR(CAM_SYNC,
  190. "Error: Sync object already signaled sync_obj = %d",
  191. sync_obj);
  192. return -EALREADY;
  193. }
  194. if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
  195. status != CAM_SYNC_STATE_SIGNALED_ERROR) {
  196. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  197. CAM_ERR(CAM_SYNC,
  198. "Error: signaling with undefined status = %d",
  199. status);
  200. return -EINVAL;
  201. }
  202. if (!atomic_dec_and_test(&row->ref_cnt)) {
  203. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  204. return 0;
  205. }
  206. row->state = status;
  207. cam_sync_util_dispatch_signaled_cb(sync_obj, status);
  208. /* copy parent list to local and release child lock */
  209. INIT_LIST_HEAD(&parents_list);
  210. list_splice_init(&row->parents_list, &parents_list);
  211. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  212. if (list_empty(&parents_list))
  213. return 0;
  214. /*
  215. * Now iterate over all parents of this object and if they too need to
  216. * be signaled dispatch cb's
  217. */
  218. list_for_each_entry_safe(parent_info,
  219. temp_parent_info,
  220. &parents_list,
  221. list) {
  222. parent_row = sync_dev->sync_table + parent_info->sync_id;
  223. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  224. parent_row->remaining--;
  225. rc = cam_sync_util_update_parent_state(
  226. parent_row,
  227. status);
  228. if (rc) {
  229. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  230. parent_row->state);
  231. spin_unlock_bh(
  232. &sync_dev->row_spinlocks[parent_info->sync_id]);
  233. kfree(parent_info);
  234. continue;
  235. }
  236. if (!parent_row->remaining)
  237. cam_sync_util_dispatch_signaled_cb(
  238. parent_info->sync_id, parent_row->state);
  239. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  240. list_del_init(&parent_info->list);
  241. kfree(parent_info);
  242. }
  243. return 0;
  244. }
  245. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  246. {
  247. int rc;
  248. long idx = 0;
  249. bool bit;
  250. if (!sync_obj || !merged_obj) {
  251. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  252. return -EINVAL;
  253. }
  254. if (num_objs <= 1) {
  255. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  256. return -EINVAL;
  257. }
  258. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  259. != num_objs) {
  260. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  261. return -EINVAL;
  262. }
  263. do {
  264. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  265. if (idx >= CAM_SYNC_MAX_OBJS)
  266. return -ENOMEM;
  267. bit = test_and_set_bit(idx, sync_dev->bitmap);
  268. } while (bit);
  269. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  270. rc = cam_sync_init_group_object(sync_dev->sync_table,
  271. idx, sync_obj,
  272. num_objs);
  273. if (rc < 0) {
  274. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  275. idx);
  276. clear_bit(idx, sync_dev->bitmap);
  277. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  278. return -EINVAL;
  279. }
  280. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  281. *merged_obj = idx;
  282. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  283. return 0;
  284. }
  285. int cam_sync_get_obj_ref(int32_t sync_obj)
  286. {
  287. struct sync_table_row *row = NULL;
  288. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  289. return -EINVAL;
  290. row = sync_dev->sync_table + sync_obj;
  291. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  292. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  293. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  294. CAM_ERR(CAM_SYNC,
  295. "Error: accessing an uninitialized sync obj = %d",
  296. sync_obj);
  297. return -EINVAL;
  298. }
  299. atomic_inc(&row->ref_cnt);
  300. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  301. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  302. return 0;
  303. }
  304. int cam_sync_put_obj_ref(int32_t sync_obj)
  305. {
  306. struct sync_table_row *row = NULL;
  307. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  308. return -EINVAL;
  309. row = sync_dev->sync_table + sync_obj;
  310. atomic_dec(&row->ref_cnt);
  311. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  312. return 0;
  313. }
  314. int cam_sync_destroy(int32_t sync_obj)
  315. {
  316. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  317. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  318. }
  319. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  320. {
  321. unsigned long timeleft;
  322. int rc = -EINVAL;
  323. struct sync_table_row *row = NULL;
  324. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  325. return -EINVAL;
  326. row = sync_dev->sync_table + sync_obj;
  327. if (row->state == CAM_SYNC_STATE_INVALID) {
  328. CAM_ERR(CAM_SYNC,
  329. "Error: accessing an uninitialized sync obj = %d",
  330. sync_obj);
  331. return -EINVAL;
  332. }
  333. timeleft = wait_for_completion_timeout(&row->signaled,
  334. msecs_to_jiffies(timeout_ms));
  335. if (!timeleft) {
  336. CAM_ERR(CAM_SYNC,
  337. "Error: timed out for sync obj = %d", sync_obj);
  338. rc = -ETIMEDOUT;
  339. } else {
  340. switch (row->state) {
  341. case CAM_SYNC_STATE_INVALID:
  342. case CAM_SYNC_STATE_ACTIVE:
  343. case CAM_SYNC_STATE_SIGNALED_ERROR:
  344. CAM_ERR(CAM_SYNC,
  345. "Error: Wait on invalid state = %d, obj = %d",
  346. row->state, sync_obj);
  347. rc = -EINVAL;
  348. break;
  349. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  350. rc = 0;
  351. break;
  352. default:
  353. rc = -EINVAL;
  354. break;
  355. }
  356. }
  357. return rc;
  358. }
  359. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  360. {
  361. struct cam_sync_info sync_create;
  362. int result;
  363. if (k_ioctl->size != sizeof(struct cam_sync_info))
  364. return -EINVAL;
  365. if (!k_ioctl->ioctl_ptr)
  366. return -EINVAL;
  367. if (copy_from_user(&sync_create,
  368. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  369. k_ioctl->size))
  370. return -EFAULT;
  371. result = cam_sync_create(&sync_create.sync_obj,
  372. sync_create.name);
  373. if (!result)
  374. if (copy_to_user(
  375. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  376. &sync_create,
  377. k_ioctl->size))
  378. return -EFAULT;
  379. return result;
  380. }
  381. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  382. {
  383. int rc = 0;
  384. struct cam_sync_signal sync_signal;
  385. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  386. return -EINVAL;
  387. if (!k_ioctl->ioctl_ptr)
  388. return -EINVAL;
  389. if (copy_from_user(&sync_signal,
  390. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  391. k_ioctl->size))
  392. return -EFAULT;
  393. /* need to get ref for UMD signaled fences */
  394. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  395. if (rc) {
  396. CAM_DBG(CAM_SYNC,
  397. "Error: cannot signal an uninitialized sync obj = %d",
  398. sync_signal.sync_obj);
  399. return rc;
  400. }
  401. return cam_sync_signal(sync_signal.sync_obj,
  402. sync_signal.sync_state);
  403. }
  404. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  405. {
  406. struct cam_sync_merge sync_merge;
  407. uint32_t *sync_objs;
  408. uint32_t num_objs;
  409. uint32_t size;
  410. int result;
  411. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  412. return -EINVAL;
  413. if (!k_ioctl->ioctl_ptr)
  414. return -EINVAL;
  415. if (copy_from_user(&sync_merge,
  416. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  417. k_ioctl->size))
  418. return -EFAULT;
  419. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  420. return -EINVAL;
  421. size = sizeof(uint32_t) * sync_merge.num_objs;
  422. sync_objs = kzalloc(size, GFP_ATOMIC);
  423. if (!sync_objs)
  424. return -ENOMEM;
  425. if (copy_from_user(sync_objs,
  426. u64_to_user_ptr(sync_merge.sync_objs),
  427. sizeof(uint32_t) * sync_merge.num_objs)) {
  428. kfree(sync_objs);
  429. return -EFAULT;
  430. }
  431. num_objs = sync_merge.num_objs;
  432. result = cam_sync_merge(sync_objs,
  433. num_objs,
  434. &sync_merge.merged);
  435. if (!result)
  436. if (copy_to_user(
  437. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  438. &sync_merge,
  439. k_ioctl->size)) {
  440. kfree(sync_objs);
  441. return -EFAULT;
  442. }
  443. kfree(sync_objs);
  444. return result;
  445. }
  446. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  447. {
  448. struct cam_sync_wait sync_wait;
  449. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  450. return -EINVAL;
  451. if (!k_ioctl->ioctl_ptr)
  452. return -EINVAL;
  453. if (copy_from_user(&sync_wait,
  454. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  455. k_ioctl->size))
  456. return -EFAULT;
  457. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  458. sync_wait.timeout_ms);
  459. return 0;
  460. }
  461. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  462. {
  463. struct cam_sync_info sync_create;
  464. if (k_ioctl->size != sizeof(struct cam_sync_info))
  465. return -EINVAL;
  466. if (!k_ioctl->ioctl_ptr)
  467. return -EINVAL;
  468. if (copy_from_user(&sync_create,
  469. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  470. k_ioctl->size))
  471. return -EFAULT;
  472. return cam_sync_destroy(sync_create.sync_obj);
  473. }
  474. static int cam_sync_handle_register_user_payload(
  475. struct cam_private_ioctl_arg *k_ioctl)
  476. {
  477. struct cam_sync_userpayload_info userpayload_info;
  478. struct sync_user_payload *user_payload_kernel;
  479. struct sync_user_payload *user_payload_iter;
  480. struct sync_user_payload *temp_upayload_kernel;
  481. uint32_t sync_obj;
  482. struct sync_table_row *row = NULL;
  483. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  484. return -EINVAL;
  485. if (!k_ioctl->ioctl_ptr)
  486. return -EINVAL;
  487. if (copy_from_user(&userpayload_info,
  488. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  489. k_ioctl->size))
  490. return -EFAULT;
  491. sync_obj = userpayload_info.sync_obj;
  492. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  493. return -EINVAL;
  494. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  495. if (!user_payload_kernel)
  496. return -ENOMEM;
  497. memcpy(user_payload_kernel->payload_data,
  498. userpayload_info.payload,
  499. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  500. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  501. row = sync_dev->sync_table + sync_obj;
  502. if (row->state == CAM_SYNC_STATE_INVALID) {
  503. CAM_ERR(CAM_SYNC,
  504. "Error: accessing an uninitialized sync obj = %d",
  505. sync_obj);
  506. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  507. kfree(user_payload_kernel);
  508. return -EINVAL;
  509. }
  510. if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  511. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
  512. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  513. sync_obj,
  514. row->state,
  515. user_payload_kernel->payload_data,
  516. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
  517. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  518. kfree(user_payload_kernel);
  519. return 0;
  520. }
  521. list_for_each_entry_safe(user_payload_iter,
  522. temp_upayload_kernel,
  523. &row->user_payload_list,
  524. list) {
  525. if (user_payload_iter->payload_data[0] ==
  526. user_payload_kernel->payload_data[0] &&
  527. user_payload_iter->payload_data[1] ==
  528. user_payload_kernel->payload_data[1]) {
  529. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  530. kfree(user_payload_kernel);
  531. return -EALREADY;
  532. }
  533. }
  534. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  535. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  536. return 0;
  537. }
  538. static int cam_sync_handle_deregister_user_payload(
  539. struct cam_private_ioctl_arg *k_ioctl)
  540. {
  541. struct cam_sync_userpayload_info userpayload_info;
  542. struct sync_user_payload *user_payload_kernel, *temp;
  543. uint32_t sync_obj;
  544. struct sync_table_row *row = NULL;
  545. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  546. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  547. return -EINVAL;
  548. }
  549. if (!k_ioctl->ioctl_ptr) {
  550. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  551. return -EINVAL;
  552. }
  553. if (copy_from_user(&userpayload_info,
  554. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  555. k_ioctl->size))
  556. return -EFAULT;
  557. sync_obj = userpayload_info.sync_obj;
  558. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  559. return -EINVAL;
  560. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  561. row = sync_dev->sync_table + sync_obj;
  562. if (row->state == CAM_SYNC_STATE_INVALID) {
  563. CAM_ERR(CAM_SYNC,
  564. "Error: accessing an uninitialized sync obj = %d",
  565. sync_obj);
  566. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  567. return -EINVAL;
  568. }
  569. list_for_each_entry_safe(user_payload_kernel, temp,
  570. &row->user_payload_list, list) {
  571. if (user_payload_kernel->payload_data[0] ==
  572. userpayload_info.payload[0] &&
  573. user_payload_kernel->payload_data[1] ==
  574. userpayload_info.payload[1]) {
  575. list_del_init(&user_payload_kernel->list);
  576. kfree(user_payload_kernel);
  577. }
  578. }
  579. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  580. return 0;
  581. }
  582. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  583. bool valid_prio, unsigned int cmd, void *arg)
  584. {
  585. int32_t rc;
  586. struct sync_device *sync_dev = video_drvdata(filep);
  587. struct cam_private_ioctl_arg k_ioctl;
  588. if (!sync_dev) {
  589. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  590. return -EINVAL;
  591. }
  592. if (!arg)
  593. return -EINVAL;
  594. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  595. return -ENOIOCTLCMD;
  596. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  597. switch (k_ioctl.id) {
  598. case CAM_SYNC_CREATE:
  599. rc = cam_sync_handle_create(&k_ioctl);
  600. break;
  601. case CAM_SYNC_DESTROY:
  602. rc = cam_sync_handle_destroy(&k_ioctl);
  603. break;
  604. case CAM_SYNC_REGISTER_PAYLOAD:
  605. rc = cam_sync_handle_register_user_payload(
  606. &k_ioctl);
  607. break;
  608. case CAM_SYNC_DEREGISTER_PAYLOAD:
  609. rc = cam_sync_handle_deregister_user_payload(
  610. &k_ioctl);
  611. break;
  612. case CAM_SYNC_SIGNAL:
  613. rc = cam_sync_handle_signal(&k_ioctl);
  614. break;
  615. case CAM_SYNC_MERGE:
  616. rc = cam_sync_handle_merge(&k_ioctl);
  617. break;
  618. case CAM_SYNC_WAIT:
  619. rc = cam_sync_handle_wait(&k_ioctl);
  620. ((struct cam_private_ioctl_arg *)arg)->result =
  621. k_ioctl.result;
  622. break;
  623. default:
  624. rc = -ENOIOCTLCMD;
  625. }
  626. return rc;
  627. }
  628. static unsigned int cam_sync_poll(struct file *f,
  629. struct poll_table_struct *pll_table)
  630. {
  631. int rc = 0;
  632. struct v4l2_fh *eventq = f->private_data;
  633. if (!eventq)
  634. return -EINVAL;
  635. poll_wait(f, &eventq->wait, pll_table);
  636. if (v4l2_event_pending(eventq))
  637. rc = POLLPRI;
  638. return rc;
  639. }
  640. static int cam_sync_open(struct file *filep)
  641. {
  642. int rc;
  643. struct sync_device *sync_dev = video_drvdata(filep);
  644. if (!sync_dev) {
  645. CAM_ERR(CAM_SYNC, "Sync device NULL");
  646. return -ENODEV;
  647. }
  648. mutex_lock(&sync_dev->table_lock);
  649. if (sync_dev->open_cnt >= 1) {
  650. mutex_unlock(&sync_dev->table_lock);
  651. return -EALREADY;
  652. }
  653. rc = v4l2_fh_open(filep);
  654. if (!rc) {
  655. sync_dev->open_cnt++;
  656. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  657. sync_dev->cam_sync_eventq = filep->private_data;
  658. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  659. } else {
  660. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  661. }
  662. mutex_unlock(&sync_dev->table_lock);
  663. return rc;
  664. }
  665. static int cam_sync_close(struct file *filep)
  666. {
  667. int rc = 0;
  668. int i;
  669. struct sync_device *sync_dev = video_drvdata(filep);
  670. if (!sync_dev) {
  671. CAM_ERR(CAM_SYNC, "Sync device NULL");
  672. rc = -ENODEV;
  673. return rc;
  674. }
  675. mutex_lock(&sync_dev->table_lock);
  676. sync_dev->open_cnt--;
  677. if (!sync_dev->open_cnt) {
  678. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  679. struct sync_table_row *row =
  680. sync_dev->sync_table + i;
  681. /*
  682. * Signal all ACTIVE objects as ERR, but we don't
  683. * care about the return status here apart from logging
  684. * it.
  685. */
  686. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  687. rc = cam_sync_signal(i,
  688. CAM_SYNC_STATE_SIGNALED_ERROR);
  689. if (rc < 0)
  690. CAM_ERR(CAM_SYNC,
  691. "Cleanup signal fail idx:%d\n",
  692. i);
  693. }
  694. }
  695. /*
  696. * Flush the work queue to wait for pending signal callbacks to
  697. * finish
  698. */
  699. flush_workqueue(sync_dev->work_queue);
  700. /*
  701. * Now that all callbacks worker threads have finished,
  702. * destroy the sync objects
  703. */
  704. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  705. struct sync_table_row *row =
  706. sync_dev->sync_table + i;
  707. if (row->state != CAM_SYNC_STATE_INVALID) {
  708. rc = cam_sync_destroy(i);
  709. if (rc < 0)
  710. CAM_ERR(CAM_SYNC,
  711. "Cleanup destroy fail:idx:%d\n",
  712. i);
  713. }
  714. }
  715. }
  716. mutex_unlock(&sync_dev->table_lock);
  717. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  718. sync_dev->cam_sync_eventq = NULL;
  719. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  720. v4l2_fh_release(filep);
  721. return rc;
  722. }
  723. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  724. struct v4l2_event *new)
  725. {
  726. struct cam_sync_ev_header *ev_header;
  727. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  728. CAM_ERR(CAM_CRM, "Failed to notify event id %d fence %d statue %d",
  729. old->id, ev_header->sync_obj, ev_header->status);
  730. }
  731. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  732. .merge = cam_sync_event_queue_notify_error,
  733. };
  734. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  735. const struct v4l2_event_subscription *sub)
  736. {
  737. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  738. &cam_sync_v4l2_ops);
  739. }
  740. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  741. const struct v4l2_event_subscription *sub)
  742. {
  743. return v4l2_event_unsubscribe(fh, sub);
  744. }
  745. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  746. .vidioc_subscribe_event = cam_sync_subscribe_event,
  747. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  748. .vidioc_default = cam_sync_dev_ioctl,
  749. };
  750. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  751. .owner = THIS_MODULE,
  752. .open = cam_sync_open,
  753. .release = cam_sync_close,
  754. .poll = cam_sync_poll,
  755. .unlocked_ioctl = video_ioctl2,
  756. #ifdef CONFIG_COMPAT
  757. .compat_ioctl32 = video_ioctl2,
  758. #endif
  759. };
  760. #if defined(CONFIG_MEDIA_CONTROLLER)
  761. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  762. struct platform_device *pdev)
  763. {
  764. int rc;
  765. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  766. GFP_KERNEL);
  767. if (!sync_dev->v4l2_dev.mdev)
  768. return -ENOMEM;
  769. media_device_init(sync_dev->v4l2_dev.mdev);
  770. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  771. sizeof(sync_dev->v4l2_dev.mdev->model));
  772. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  773. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  774. if (rc < 0)
  775. goto register_fail;
  776. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  777. if (rc < 0)
  778. goto entity_fail;
  779. return 0;
  780. entity_fail:
  781. media_device_unregister(sync_dev->v4l2_dev.mdev);
  782. register_fail:
  783. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  784. return rc;
  785. }
  786. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  787. {
  788. media_entity_cleanup(&sync_dev->vdev->entity);
  789. media_device_unregister(sync_dev->v4l2_dev.mdev);
  790. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  791. kfree(sync_dev->v4l2_dev.mdev);
  792. }
  793. static void cam_sync_init_entity(struct sync_device *sync_dev)
  794. {
  795. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  796. sync_dev->vdev->entity.name =
  797. video_device_node_name(sync_dev->vdev);
  798. }
  799. #else
  800. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  801. struct platform_device *pdev)
  802. {
  803. return 0;
  804. }
  805. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  806. {
  807. }
  808. static void cam_sync_init_entity(struct sync_device *sync_dev)
  809. {
  810. }
  811. #endif
  812. static int cam_sync_create_debugfs(void)
  813. {
  814. sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
  815. if (!sync_dev->dentry) {
  816. CAM_ERR(CAM_SYNC, "Failed to create sync dir");
  817. return -ENOMEM;
  818. }
  819. if (!debugfs_create_bool("trigger_cb_without_switch",
  820. 0644, sync_dev->dentry,
  821. &trigger_cb_without_switch)) {
  822. CAM_ERR(CAM_SYNC,
  823. "failed to create trigger_cb_without_switch entry");
  824. return -ENOMEM;
  825. }
  826. return 0;
  827. }
  828. #ifdef CONFIG_MSM_GLOBAL_SYNX
  829. static void cam_sync_register_synx_bind_ops(void)
  830. {
  831. int rc = 0;
  832. struct synx_register_params params;
  833. params.name = CAM_SYNC_NAME;
  834. params.type = SYNX_TYPE_CSL;
  835. params.ops.register_callback = cam_sync_register_callback;
  836. params.ops.deregister_callback = cam_sync_deregister_callback;
  837. params.ops.enable_signaling = cam_sync_get_obj_ref;
  838. params.ops.signal = cam_sync_signal;
  839. rc = synx_register_ops(&params);
  840. if (rc)
  841. CAM_ERR(CAM_SYNC, "synx registration fail with %d", rc);
  842. }
  843. #endif
  844. static int cam_sync_component_bind(struct device *dev,
  845. struct device *master_dev, void *data)
  846. {
  847. int rc;
  848. int idx;
  849. struct platform_device *pdev = to_platform_device(dev);
  850. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  851. if (!sync_dev)
  852. return -ENOMEM;
  853. mutex_init(&sync_dev->table_lock);
  854. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  855. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  856. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  857. sync_dev->vdev = video_device_alloc();
  858. if (!sync_dev->vdev) {
  859. rc = -ENOMEM;
  860. goto vdev_fail;
  861. }
  862. rc = cam_sync_media_controller_init(sync_dev, pdev);
  863. if (rc < 0)
  864. goto mcinit_fail;
  865. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  866. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  867. if (rc < 0)
  868. goto register_fail;
  869. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  870. sizeof(sync_dev->vdev->name));
  871. sync_dev->vdev->release = video_device_release;
  872. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  873. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  874. sync_dev->vdev->minor = -1;
  875. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  876. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  877. rc = video_register_device(sync_dev->vdev,
  878. VFL_TYPE_GRABBER, -1);
  879. if (rc < 0) {
  880. CAM_ERR(CAM_SYNC,
  881. "video device registration failure rc = %d, name = %s, device_caps = %d",
  882. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  883. goto v4l2_fail;
  884. }
  885. cam_sync_init_entity(sync_dev);
  886. video_set_drvdata(sync_dev->vdev, sync_dev);
  887. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  888. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  889. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  890. /*
  891. * We treat zero as invalid handle, so we will keep the 0th bit set
  892. * always
  893. */
  894. set_bit(0, sync_dev->bitmap);
  895. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  896. WQ_HIGHPRI | WQ_UNBOUND, 1);
  897. if (!sync_dev->work_queue) {
  898. CAM_ERR(CAM_SYNC,
  899. "Error: high priority work queue creation failed");
  900. rc = -ENOMEM;
  901. goto v4l2_fail;
  902. }
  903. trigger_cb_without_switch = false;
  904. cam_sync_create_debugfs();
  905. #ifdef CONFIG_MSM_GLOBAL_SYNX
  906. cam_sync_register_synx_bind_ops();
  907. #endif
  908. CAM_DBG(CAM_SYNC, "Component bound successfully");
  909. return rc;
  910. v4l2_fail:
  911. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  912. register_fail:
  913. cam_sync_media_controller_cleanup(sync_dev);
  914. mcinit_fail:
  915. video_device_release(sync_dev->vdev);
  916. vdev_fail:
  917. mutex_destroy(&sync_dev->table_lock);
  918. kfree(sync_dev);
  919. return rc;
  920. }
  921. static void cam_sync_component_unbind(struct device *dev,
  922. struct device *master_dev, void *data)
  923. {
  924. int i;
  925. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  926. cam_sync_media_controller_cleanup(sync_dev);
  927. video_device_release(sync_dev->vdev);
  928. debugfs_remove_recursive(sync_dev->dentry);
  929. sync_dev->dentry = NULL;
  930. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  931. spin_lock_init(&sync_dev->row_spinlocks[i]);
  932. kfree(sync_dev);
  933. sync_dev = NULL;
  934. }
  935. const static struct component_ops cam_sync_component_ops = {
  936. .bind = cam_sync_component_bind,
  937. .unbind = cam_sync_component_unbind,
  938. };
  939. static int cam_sync_probe(struct platform_device *pdev)
  940. {
  941. int rc = 0;
  942. CAM_DBG(CAM_SYNC, "Adding Sync component");
  943. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  944. if (rc)
  945. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  946. return rc;
  947. }
  948. static int cam_sync_remove(struct platform_device *pdev)
  949. {
  950. component_del(&pdev->dev, &cam_sync_component_ops);
  951. return 0;
  952. }
  953. static const struct of_device_id cam_sync_dt_match[] = {
  954. {.compatible = "qcom,cam-sync"},
  955. {}
  956. };
  957. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  958. struct platform_driver cam_sync_driver = {
  959. .probe = cam_sync_probe,
  960. .remove = cam_sync_remove,
  961. .driver = {
  962. .name = "cam_sync",
  963. .owner = THIS_MODULE,
  964. .of_match_table = cam_sync_dt_match,
  965. .suppress_bind_attrs = true,
  966. },
  967. };
  968. int cam_sync_init(void)
  969. {
  970. return platform_driver_register(&cam_sync_driver);
  971. }
  972. void cam_sync_exit(void)
  973. {
  974. platform_driver_unregister(&cam_sync_driver);
  975. }
  976. MODULE_DESCRIPTION("Camera sync driver");
  977. MODULE_LICENSE("GPL v2");