cam_sync.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #include "cam_sync_util.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_common_util.h"
  14. #ifdef CONFIG_MSM_GLOBAL_SYNX
  15. #include <synx_api.h>
  16. #endif
  17. struct sync_device *sync_dev;
  18. /*
  19. * Flag to determine whether to enqueue cb of a
  20. * signaled fence onto the workq or invoke it
  21. * directly in the same context
  22. */
  23. static bool trigger_cb_without_switch;
  24. static void cam_sync_print_fence_table(void)
  25. {
  26. int idx;
  27. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  28. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  29. CAM_INFO(CAM_SYNC,
  30. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  31. idx,
  32. sync_dev->sync_table[idx].sync_id,
  33. sync_dev->sync_table[idx].name,
  34. sync_dev->sync_table[idx].type,
  35. sync_dev->sync_table[idx].state,
  36. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  37. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  38. }
  39. }
  40. int cam_sync_create(int32_t *sync_obj, const char *name)
  41. {
  42. int rc;
  43. long idx;
  44. bool bit;
  45. do {
  46. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  47. if (idx >= CAM_SYNC_MAX_OBJS) {
  48. CAM_ERR(CAM_SYNC,
  49. "Error: Unable to create sync idx = %d reached max!",
  50. idx);
  51. cam_sync_print_fence_table();
  52. return -ENOMEM;
  53. }
  54. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  55. bit = test_and_set_bit(idx, sync_dev->bitmap);
  56. } while (bit);
  57. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  58. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  59. CAM_SYNC_TYPE_INDV);
  60. if (rc) {
  61. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  62. idx);
  63. clear_bit(idx, sync_dev->bitmap);
  64. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  65. return -EINVAL;
  66. }
  67. *sync_obj = idx;
  68. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  69. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  70. return rc;
  71. }
  72. int cam_sync_register_callback(sync_callback cb_func,
  73. void *userdata, int32_t sync_obj)
  74. {
  75. struct sync_callback_info *sync_cb;
  76. struct sync_table_row *row = NULL;
  77. int status = 0;
  78. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  79. return -EINVAL;
  80. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  81. row = sync_dev->sync_table + sync_obj;
  82. if (row->state == CAM_SYNC_STATE_INVALID) {
  83. CAM_ERR(CAM_SYNC,
  84. "Error: accessing an uninitialized sync obj %d",
  85. sync_obj);
  86. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  87. return -EINVAL;
  88. }
  89. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  90. if (!sync_cb) {
  91. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  92. return -ENOMEM;
  93. }
  94. /* Trigger callback if sync object is already in SIGNALED state */
  95. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  96. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
  97. (!row->remaining)) {
  98. if (trigger_cb_without_switch) {
  99. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  100. sync_obj);
  101. status = row->state;
  102. kfree(sync_cb);
  103. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  104. cb_func(sync_obj, status, userdata);
  105. } else {
  106. sync_cb->callback_func = cb_func;
  107. sync_cb->cb_data = userdata;
  108. sync_cb->sync_obj = sync_obj;
  109. INIT_WORK(&sync_cb->cb_dispatch_work,
  110. cam_sync_util_cb_dispatch);
  111. sync_cb->status = row->state;
  112. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  113. sync_cb->sync_obj);
  114. queue_work(sync_dev->work_queue,
  115. &sync_cb->cb_dispatch_work);
  116. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  117. }
  118. return 0;
  119. }
  120. sync_cb->callback_func = cb_func;
  121. sync_cb->cb_data = userdata;
  122. sync_cb->sync_obj = sync_obj;
  123. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  124. list_add_tail(&sync_cb->list, &row->callback_list);
  125. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  126. return 0;
  127. }
  128. int cam_sync_deregister_callback(sync_callback cb_func,
  129. void *userdata, int32_t sync_obj)
  130. {
  131. struct sync_table_row *row = NULL;
  132. struct sync_callback_info *sync_cb, *temp;
  133. bool found = false;
  134. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  135. return -EINVAL;
  136. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  137. row = sync_dev->sync_table + sync_obj;
  138. if (row->state == CAM_SYNC_STATE_INVALID) {
  139. CAM_ERR(CAM_SYNC,
  140. "Error: accessing an uninitialized sync obj = %d",
  141. sync_obj);
  142. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  143. return -EINVAL;
  144. }
  145. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  146. sync_obj);
  147. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  148. if (sync_cb->callback_func == cb_func &&
  149. sync_cb->cb_data == userdata) {
  150. list_del_init(&sync_cb->list);
  151. kfree(sync_cb);
  152. found = true;
  153. }
  154. }
  155. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  156. return found ? 0 : -ENOENT;
  157. }
  158. int cam_sync_signal(int32_t sync_obj, uint32_t status)
  159. {
  160. struct sync_table_row *row = NULL;
  161. struct sync_table_row *parent_row = NULL;
  162. struct sync_parent_info *parent_info, *temp_parent_info;
  163. struct list_head parents_list;
  164. int rc = 0;
  165. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  166. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  167. sync_obj, CAM_SYNC_MAX_OBJS);
  168. return -EINVAL;
  169. }
  170. row = sync_dev->sync_table + sync_obj;
  171. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  172. if (row->state == CAM_SYNC_STATE_INVALID) {
  173. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  174. CAM_ERR(CAM_SYNC,
  175. "Error: accessing an uninitialized sync obj = %d",
  176. sync_obj);
  177. return -EINVAL;
  178. }
  179. if (row->type == CAM_SYNC_TYPE_GROUP) {
  180. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  181. CAM_ERR(CAM_SYNC,
  182. "Error: Signaling a GROUP sync object = %d",
  183. sync_obj);
  184. return -EINVAL;
  185. }
  186. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  187. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  188. CAM_ERR(CAM_SYNC,
  189. "Error: Sync object already signaled sync_obj = %d",
  190. sync_obj);
  191. return -EALREADY;
  192. }
  193. if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
  194. status != CAM_SYNC_STATE_SIGNALED_ERROR) {
  195. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  196. CAM_ERR(CAM_SYNC,
  197. "Error: signaling with undefined status = %d",
  198. status);
  199. return -EINVAL;
  200. }
  201. if (!atomic_dec_and_test(&row->ref_cnt)) {
  202. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  203. return 0;
  204. }
  205. row->state = status;
  206. cam_sync_util_dispatch_signaled_cb(sync_obj, status);
  207. /* copy parent list to local and release child lock */
  208. INIT_LIST_HEAD(&parents_list);
  209. list_splice_init(&row->parents_list, &parents_list);
  210. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  211. if (list_empty(&parents_list))
  212. return 0;
  213. /*
  214. * Now iterate over all parents of this object and if they too need to
  215. * be signaled dispatch cb's
  216. */
  217. list_for_each_entry_safe(parent_info,
  218. temp_parent_info,
  219. &parents_list,
  220. list) {
  221. parent_row = sync_dev->sync_table + parent_info->sync_id;
  222. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  223. parent_row->remaining--;
  224. rc = cam_sync_util_update_parent_state(
  225. parent_row,
  226. status);
  227. if (rc) {
  228. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  229. parent_row->state);
  230. spin_unlock_bh(
  231. &sync_dev->row_spinlocks[parent_info->sync_id]);
  232. kfree(parent_info);
  233. continue;
  234. }
  235. if (!parent_row->remaining)
  236. cam_sync_util_dispatch_signaled_cb(
  237. parent_info->sync_id, parent_row->state);
  238. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  239. list_del_init(&parent_info->list);
  240. kfree(parent_info);
  241. }
  242. return 0;
  243. }
  244. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  245. {
  246. int rc;
  247. long idx = 0;
  248. bool bit;
  249. if (!sync_obj || !merged_obj) {
  250. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  251. return -EINVAL;
  252. }
  253. if (num_objs <= 1) {
  254. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  255. return -EINVAL;
  256. }
  257. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  258. != num_objs) {
  259. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  260. return -EINVAL;
  261. }
  262. do {
  263. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  264. if (idx >= CAM_SYNC_MAX_OBJS)
  265. return -ENOMEM;
  266. bit = test_and_set_bit(idx, sync_dev->bitmap);
  267. } while (bit);
  268. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  269. rc = cam_sync_init_group_object(sync_dev->sync_table,
  270. idx, sync_obj,
  271. num_objs);
  272. if (rc < 0) {
  273. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  274. idx);
  275. clear_bit(idx, sync_dev->bitmap);
  276. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  277. return -EINVAL;
  278. }
  279. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  280. *merged_obj = idx;
  281. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  282. return 0;
  283. }
  284. int cam_sync_get_obj_ref(int32_t sync_obj)
  285. {
  286. struct sync_table_row *row = NULL;
  287. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  288. return -EINVAL;
  289. row = sync_dev->sync_table + sync_obj;
  290. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  291. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  292. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  293. CAM_ERR(CAM_SYNC,
  294. "Error: accessing an uninitialized sync obj = %d",
  295. sync_obj);
  296. return -EINVAL;
  297. }
  298. atomic_inc(&row->ref_cnt);
  299. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  300. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  301. return 0;
  302. }
  303. int cam_sync_put_obj_ref(int32_t sync_obj)
  304. {
  305. struct sync_table_row *row = NULL;
  306. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  307. return -EINVAL;
  308. row = sync_dev->sync_table + sync_obj;
  309. atomic_dec(&row->ref_cnt);
  310. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  311. return 0;
  312. }
  313. int cam_sync_destroy(int32_t sync_obj)
  314. {
  315. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  316. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  317. }
  318. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  319. {
  320. unsigned long timeleft;
  321. int rc = -EINVAL;
  322. struct sync_table_row *row = NULL;
  323. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  324. return -EINVAL;
  325. row = sync_dev->sync_table + sync_obj;
  326. if (row->state == CAM_SYNC_STATE_INVALID) {
  327. CAM_ERR(CAM_SYNC,
  328. "Error: accessing an uninitialized sync obj = %d",
  329. sync_obj);
  330. return -EINVAL;
  331. }
  332. timeleft = wait_for_completion_timeout(&row->signaled,
  333. msecs_to_jiffies(timeout_ms));
  334. if (!timeleft) {
  335. CAM_ERR(CAM_SYNC,
  336. "Error: timed out for sync obj = %d", sync_obj);
  337. rc = -ETIMEDOUT;
  338. } else {
  339. switch (row->state) {
  340. case CAM_SYNC_STATE_INVALID:
  341. case CAM_SYNC_STATE_ACTIVE:
  342. case CAM_SYNC_STATE_SIGNALED_ERROR:
  343. CAM_ERR(CAM_SYNC,
  344. "Error: Wait on invalid state = %d, obj = %d",
  345. row->state, sync_obj);
  346. rc = -EINVAL;
  347. break;
  348. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  349. rc = 0;
  350. break;
  351. default:
  352. rc = -EINVAL;
  353. break;
  354. }
  355. }
  356. return rc;
  357. }
  358. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  359. {
  360. struct cam_sync_info sync_create;
  361. int result;
  362. if (k_ioctl->size != sizeof(struct cam_sync_info))
  363. return -EINVAL;
  364. if (!k_ioctl->ioctl_ptr)
  365. return -EINVAL;
  366. if (copy_from_user(&sync_create,
  367. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  368. k_ioctl->size))
  369. return -EFAULT;
  370. result = cam_sync_create(&sync_create.sync_obj,
  371. sync_create.name);
  372. if (!result)
  373. if (copy_to_user(
  374. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  375. &sync_create,
  376. k_ioctl->size))
  377. return -EFAULT;
  378. return result;
  379. }
  380. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  381. {
  382. int rc = 0;
  383. struct cam_sync_signal sync_signal;
  384. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  385. return -EINVAL;
  386. if (!k_ioctl->ioctl_ptr)
  387. return -EINVAL;
  388. if (copy_from_user(&sync_signal,
  389. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  390. k_ioctl->size))
  391. return -EFAULT;
  392. /* need to get ref for UMD signaled fences */
  393. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  394. if (rc) {
  395. CAM_DBG(CAM_SYNC,
  396. "Error: cannot signal an uninitialized sync obj = %d",
  397. sync_signal.sync_obj);
  398. return rc;
  399. }
  400. return cam_sync_signal(sync_signal.sync_obj,
  401. sync_signal.sync_state);
  402. }
  403. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  404. {
  405. struct cam_sync_merge sync_merge;
  406. uint32_t *sync_objs;
  407. uint32_t num_objs;
  408. uint32_t size;
  409. int result;
  410. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  411. return -EINVAL;
  412. if (!k_ioctl->ioctl_ptr)
  413. return -EINVAL;
  414. if (copy_from_user(&sync_merge,
  415. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  416. k_ioctl->size))
  417. return -EFAULT;
  418. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  419. return -EINVAL;
  420. size = sizeof(uint32_t) * sync_merge.num_objs;
  421. sync_objs = kzalloc(size, GFP_ATOMIC);
  422. if (!sync_objs)
  423. return -ENOMEM;
  424. if (copy_from_user(sync_objs,
  425. u64_to_user_ptr(sync_merge.sync_objs),
  426. sizeof(uint32_t) * sync_merge.num_objs)) {
  427. kfree(sync_objs);
  428. return -EFAULT;
  429. }
  430. num_objs = sync_merge.num_objs;
  431. result = cam_sync_merge(sync_objs,
  432. num_objs,
  433. &sync_merge.merged);
  434. if (!result)
  435. if (copy_to_user(
  436. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  437. &sync_merge,
  438. k_ioctl->size)) {
  439. kfree(sync_objs);
  440. return -EFAULT;
  441. }
  442. kfree(sync_objs);
  443. return result;
  444. }
  445. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  446. {
  447. struct cam_sync_wait sync_wait;
  448. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  449. return -EINVAL;
  450. if (!k_ioctl->ioctl_ptr)
  451. return -EINVAL;
  452. if (copy_from_user(&sync_wait,
  453. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  454. k_ioctl->size))
  455. return -EFAULT;
  456. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  457. sync_wait.timeout_ms);
  458. return 0;
  459. }
  460. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  461. {
  462. struct cam_sync_info sync_create;
  463. if (k_ioctl->size != sizeof(struct cam_sync_info))
  464. return -EINVAL;
  465. if (!k_ioctl->ioctl_ptr)
  466. return -EINVAL;
  467. if (copy_from_user(&sync_create,
  468. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  469. k_ioctl->size))
  470. return -EFAULT;
  471. return cam_sync_destroy(sync_create.sync_obj);
  472. }
  473. static int cam_sync_handle_register_user_payload(
  474. struct cam_private_ioctl_arg *k_ioctl)
  475. {
  476. struct cam_sync_userpayload_info userpayload_info;
  477. struct sync_user_payload *user_payload_kernel;
  478. struct sync_user_payload *user_payload_iter;
  479. struct sync_user_payload *temp_upayload_kernel;
  480. uint32_t sync_obj;
  481. struct sync_table_row *row = NULL;
  482. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  483. return -EINVAL;
  484. if (!k_ioctl->ioctl_ptr)
  485. return -EINVAL;
  486. if (copy_from_user(&userpayload_info,
  487. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  488. k_ioctl->size))
  489. return -EFAULT;
  490. sync_obj = userpayload_info.sync_obj;
  491. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  492. return -EINVAL;
  493. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  494. if (!user_payload_kernel)
  495. return -ENOMEM;
  496. memcpy(user_payload_kernel->payload_data,
  497. userpayload_info.payload,
  498. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  499. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  500. row = sync_dev->sync_table + sync_obj;
  501. if (row->state == CAM_SYNC_STATE_INVALID) {
  502. CAM_ERR(CAM_SYNC,
  503. "Error: accessing an uninitialized sync obj = %d",
  504. sync_obj);
  505. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  506. kfree(user_payload_kernel);
  507. return -EINVAL;
  508. }
  509. if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  510. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
  511. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  512. sync_obj,
  513. row->state,
  514. user_payload_kernel->payload_data,
  515. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
  516. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  517. kfree(user_payload_kernel);
  518. return 0;
  519. }
  520. list_for_each_entry_safe(user_payload_iter,
  521. temp_upayload_kernel,
  522. &row->user_payload_list,
  523. list) {
  524. if (user_payload_iter->payload_data[0] ==
  525. user_payload_kernel->payload_data[0] &&
  526. user_payload_iter->payload_data[1] ==
  527. user_payload_kernel->payload_data[1]) {
  528. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  529. kfree(user_payload_kernel);
  530. return -EALREADY;
  531. }
  532. }
  533. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  534. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  535. return 0;
  536. }
  537. static int cam_sync_handle_deregister_user_payload(
  538. struct cam_private_ioctl_arg *k_ioctl)
  539. {
  540. struct cam_sync_userpayload_info userpayload_info;
  541. struct sync_user_payload *user_payload_kernel, *temp;
  542. uint32_t sync_obj;
  543. struct sync_table_row *row = NULL;
  544. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  545. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  546. return -EINVAL;
  547. }
  548. if (!k_ioctl->ioctl_ptr) {
  549. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  550. return -EINVAL;
  551. }
  552. if (copy_from_user(&userpayload_info,
  553. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  554. k_ioctl->size))
  555. return -EFAULT;
  556. sync_obj = userpayload_info.sync_obj;
  557. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  558. return -EINVAL;
  559. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  560. row = sync_dev->sync_table + sync_obj;
  561. if (row->state == CAM_SYNC_STATE_INVALID) {
  562. CAM_ERR(CAM_SYNC,
  563. "Error: accessing an uninitialized sync obj = %d",
  564. sync_obj);
  565. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  566. return -EINVAL;
  567. }
  568. list_for_each_entry_safe(user_payload_kernel, temp,
  569. &row->user_payload_list, list) {
  570. if (user_payload_kernel->payload_data[0] ==
  571. userpayload_info.payload[0] &&
  572. user_payload_kernel->payload_data[1] ==
  573. userpayload_info.payload[1]) {
  574. list_del_init(&user_payload_kernel->list);
  575. kfree(user_payload_kernel);
  576. }
  577. }
  578. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  579. return 0;
  580. }
  581. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  582. bool valid_prio, unsigned int cmd, void *arg)
  583. {
  584. int32_t rc;
  585. struct sync_device *sync_dev = video_drvdata(filep);
  586. struct cam_private_ioctl_arg k_ioctl;
  587. if (!sync_dev) {
  588. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  589. return -EINVAL;
  590. }
  591. if (!arg)
  592. return -EINVAL;
  593. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  594. return -ENOIOCTLCMD;
  595. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  596. switch (k_ioctl.id) {
  597. case CAM_SYNC_CREATE:
  598. rc = cam_sync_handle_create(&k_ioctl);
  599. break;
  600. case CAM_SYNC_DESTROY:
  601. rc = cam_sync_handle_destroy(&k_ioctl);
  602. break;
  603. case CAM_SYNC_REGISTER_PAYLOAD:
  604. rc = cam_sync_handle_register_user_payload(
  605. &k_ioctl);
  606. break;
  607. case CAM_SYNC_DEREGISTER_PAYLOAD:
  608. rc = cam_sync_handle_deregister_user_payload(
  609. &k_ioctl);
  610. break;
  611. case CAM_SYNC_SIGNAL:
  612. rc = cam_sync_handle_signal(&k_ioctl);
  613. break;
  614. case CAM_SYNC_MERGE:
  615. rc = cam_sync_handle_merge(&k_ioctl);
  616. break;
  617. case CAM_SYNC_WAIT:
  618. rc = cam_sync_handle_wait(&k_ioctl);
  619. ((struct cam_private_ioctl_arg *)arg)->result =
  620. k_ioctl.result;
  621. break;
  622. default:
  623. rc = -ENOIOCTLCMD;
  624. }
  625. return rc;
  626. }
  627. static unsigned int cam_sync_poll(struct file *f,
  628. struct poll_table_struct *pll_table)
  629. {
  630. int rc = 0;
  631. struct v4l2_fh *eventq = f->private_data;
  632. if (!eventq)
  633. return -EINVAL;
  634. poll_wait(f, &eventq->wait, pll_table);
  635. if (v4l2_event_pending(eventq))
  636. rc = POLLPRI;
  637. return rc;
  638. }
  639. static int cam_sync_open(struct file *filep)
  640. {
  641. int rc;
  642. struct sync_device *sync_dev = video_drvdata(filep);
  643. if (!sync_dev) {
  644. CAM_ERR(CAM_SYNC, "Sync device NULL");
  645. return -ENODEV;
  646. }
  647. mutex_lock(&sync_dev->table_lock);
  648. if (sync_dev->open_cnt >= 1) {
  649. mutex_unlock(&sync_dev->table_lock);
  650. return -EALREADY;
  651. }
  652. rc = v4l2_fh_open(filep);
  653. if (!rc) {
  654. sync_dev->open_cnt++;
  655. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  656. sync_dev->cam_sync_eventq = filep->private_data;
  657. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  658. } else {
  659. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  660. }
  661. mutex_unlock(&sync_dev->table_lock);
  662. return rc;
  663. }
  664. static int cam_sync_close(struct file *filep)
  665. {
  666. int rc = 0;
  667. int i;
  668. struct sync_device *sync_dev = video_drvdata(filep);
  669. if (!sync_dev) {
  670. CAM_ERR(CAM_SYNC, "Sync device NULL");
  671. rc = -ENODEV;
  672. return rc;
  673. }
  674. mutex_lock(&sync_dev->table_lock);
  675. sync_dev->open_cnt--;
  676. if (!sync_dev->open_cnt) {
  677. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  678. struct sync_table_row *row =
  679. sync_dev->sync_table + i;
  680. /*
  681. * Signal all ACTIVE objects as ERR, but we don't
  682. * care about the return status here apart from logging
  683. * it.
  684. */
  685. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  686. rc = cam_sync_signal(i,
  687. CAM_SYNC_STATE_SIGNALED_ERROR);
  688. if (rc < 0)
  689. CAM_ERR(CAM_SYNC,
  690. "Cleanup signal fail idx:%d\n",
  691. i);
  692. }
  693. }
  694. /*
  695. * Flush the work queue to wait for pending signal callbacks to
  696. * finish
  697. */
  698. flush_workqueue(sync_dev->work_queue);
  699. /*
  700. * Now that all callbacks worker threads have finished,
  701. * destroy the sync objects
  702. */
  703. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  704. struct sync_table_row *row =
  705. sync_dev->sync_table + i;
  706. if (row->state != CAM_SYNC_STATE_INVALID) {
  707. rc = cam_sync_destroy(i);
  708. if (rc < 0)
  709. CAM_ERR(CAM_SYNC,
  710. "Cleanup destroy fail:idx:%d\n",
  711. i);
  712. }
  713. }
  714. }
  715. mutex_unlock(&sync_dev->table_lock);
  716. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  717. sync_dev->cam_sync_eventq = NULL;
  718. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  719. v4l2_fh_release(filep);
  720. return rc;
  721. }
  722. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  723. struct v4l2_event *new)
  724. {
  725. struct cam_sync_ev_header *ev_header;
  726. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  727. CAM_ERR(CAM_CRM, "Failed to notify event id %d fence %d statue %d",
  728. old->id, ev_header->sync_obj, ev_header->status);
  729. }
  730. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  731. .merge = cam_sync_event_queue_notify_error,
  732. };
  733. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  734. const struct v4l2_event_subscription *sub)
  735. {
  736. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  737. &cam_sync_v4l2_ops);
  738. }
  739. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  740. const struct v4l2_event_subscription *sub)
  741. {
  742. return v4l2_event_unsubscribe(fh, sub);
  743. }
  744. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  745. .vidioc_subscribe_event = cam_sync_subscribe_event,
  746. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  747. .vidioc_default = cam_sync_dev_ioctl,
  748. };
  749. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  750. .owner = THIS_MODULE,
  751. .open = cam_sync_open,
  752. .release = cam_sync_close,
  753. .poll = cam_sync_poll,
  754. .unlocked_ioctl = video_ioctl2,
  755. #ifdef CONFIG_COMPAT
  756. .compat_ioctl32 = video_ioctl2,
  757. #endif
  758. };
  759. #if defined(CONFIG_MEDIA_CONTROLLER)
  760. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  761. struct platform_device *pdev)
  762. {
  763. int rc;
  764. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  765. GFP_KERNEL);
  766. if (!sync_dev->v4l2_dev.mdev)
  767. return -ENOMEM;
  768. media_device_init(sync_dev->v4l2_dev.mdev);
  769. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  770. sizeof(sync_dev->v4l2_dev.mdev->model));
  771. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  772. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  773. if (rc < 0)
  774. goto register_fail;
  775. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  776. if (rc < 0)
  777. goto entity_fail;
  778. return 0;
  779. entity_fail:
  780. media_device_unregister(sync_dev->v4l2_dev.mdev);
  781. register_fail:
  782. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  783. return rc;
  784. }
  785. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  786. {
  787. media_entity_cleanup(&sync_dev->vdev->entity);
  788. media_device_unregister(sync_dev->v4l2_dev.mdev);
  789. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  790. kfree(sync_dev->v4l2_dev.mdev);
  791. }
  792. static void cam_sync_init_entity(struct sync_device *sync_dev)
  793. {
  794. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  795. sync_dev->vdev->entity.name =
  796. video_device_node_name(sync_dev->vdev);
  797. }
  798. #else
  799. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  800. struct platform_device *pdev)
  801. {
  802. return 0;
  803. }
  804. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  805. {
  806. }
  807. static void cam_sync_init_entity(struct sync_device *sync_dev)
  808. {
  809. }
  810. #endif
  811. static int cam_sync_create_debugfs(void)
  812. {
  813. sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
  814. if (!sync_dev->dentry) {
  815. CAM_ERR(CAM_SYNC, "Failed to create sync dir");
  816. return -ENOMEM;
  817. }
  818. if (!debugfs_create_bool("trigger_cb_without_switch",
  819. 0644, sync_dev->dentry,
  820. &trigger_cb_without_switch)) {
  821. CAM_ERR(CAM_SYNC,
  822. "failed to create trigger_cb_without_switch entry");
  823. return -ENOMEM;
  824. }
  825. return 0;
  826. }
  827. #ifdef CONFIG_MSM_GLOBAL_SYNX
  828. static void cam_sync_register_synx_bind_ops(void)
  829. {
  830. int rc = 0;
  831. struct synx_register_params params;
  832. params.name = CAM_SYNC_NAME;
  833. params.type = SYNX_TYPE_CSL;
  834. params.ops.register_callback = cam_sync_register_callback;
  835. params.ops.deregister_callback = cam_sync_deregister_callback;
  836. params.ops.enable_signaling = cam_sync_get_obj_ref;
  837. params.ops.signal = cam_sync_signal;
  838. rc = synx_register_ops(&params);
  839. if (rc)
  840. CAM_ERR(CAM_SYNC, "synx registration fail with %d", rc);
  841. }
  842. #endif
  843. static int cam_sync_probe(struct platform_device *pdev)
  844. {
  845. int rc;
  846. int idx;
  847. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  848. if (!sync_dev)
  849. return -ENOMEM;
  850. mutex_init(&sync_dev->table_lock);
  851. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  852. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  853. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  854. sync_dev->vdev = video_device_alloc();
  855. if (!sync_dev->vdev) {
  856. rc = -ENOMEM;
  857. goto vdev_fail;
  858. }
  859. rc = cam_sync_media_controller_init(sync_dev, pdev);
  860. if (rc < 0)
  861. goto mcinit_fail;
  862. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  863. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  864. if (rc < 0)
  865. goto register_fail;
  866. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  867. sizeof(sync_dev->vdev->name));
  868. sync_dev->vdev->release = video_device_release;
  869. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  870. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  871. sync_dev->vdev->minor = -1;
  872. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  873. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  874. rc = video_register_device(sync_dev->vdev,
  875. VFL_TYPE_GRABBER, -1);
  876. if (rc < 0) {
  877. CAM_ERR(CAM_SYNC,
  878. "video device registration failure rc = %d, name = %s, device_caps = %d",
  879. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  880. goto v4l2_fail;
  881. }
  882. cam_sync_init_entity(sync_dev);
  883. video_set_drvdata(sync_dev->vdev, sync_dev);
  884. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  885. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  886. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  887. /*
  888. * We treat zero as invalid handle, so we will keep the 0th bit set
  889. * always
  890. */
  891. set_bit(0, sync_dev->bitmap);
  892. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  893. WQ_HIGHPRI | WQ_UNBOUND, 1);
  894. if (!sync_dev->work_queue) {
  895. CAM_ERR(CAM_SYNC,
  896. "Error: high priority work queue creation failed");
  897. rc = -ENOMEM;
  898. goto v4l2_fail;
  899. }
  900. trigger_cb_without_switch = false;
  901. cam_sync_create_debugfs();
  902. #ifdef CONFIG_MSM_GLOBAL_SYNX
  903. cam_sync_register_synx_bind_ops();
  904. #endif
  905. return rc;
  906. v4l2_fail:
  907. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  908. register_fail:
  909. cam_sync_media_controller_cleanup(sync_dev);
  910. mcinit_fail:
  911. video_device_release(sync_dev->vdev);
  912. vdev_fail:
  913. mutex_destroy(&sync_dev->table_lock);
  914. kfree(sync_dev);
  915. return rc;
  916. }
  917. static int cam_sync_remove(struct platform_device *pdev)
  918. {
  919. int i;
  920. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  921. cam_sync_media_controller_cleanup(sync_dev);
  922. video_device_release(sync_dev->vdev);
  923. debugfs_remove_recursive(sync_dev->dentry);
  924. sync_dev->dentry = NULL;
  925. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  926. spin_lock_init(&sync_dev->row_spinlocks[i]);
  927. kfree(sync_dev);
  928. sync_dev = NULL;
  929. return 0;
  930. }
  931. static const struct of_device_id cam_sync_dt_match[] = {
  932. {.compatible = "qcom,cam-sync"},
  933. {}
  934. };
  935. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  936. static struct platform_driver cam_sync_driver = {
  937. .probe = cam_sync_probe,
  938. .remove = cam_sync_remove,
  939. .driver = {
  940. .name = "cam_sync",
  941. .owner = THIS_MODULE,
  942. .of_match_table = cam_sync_dt_match,
  943. .suppress_bind_attrs = true,
  944. },
  945. };
  946. int cam_sync_init(void)
  947. {
  948. return platform_driver_register(&cam_sync_driver);
  949. }
  950. void cam_sync_exit(void)
  951. {
  952. platform_driver_unregister(&cam_sync_driver);
  953. }
  954. MODULE_DESCRIPTION("Camera sync driver");
  955. MODULE_LICENSE("GPL v2");