cam_sync.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #include "cam_sync_util.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_common_util.h"
  14. #ifdef CONFIG_MSM_GLOBAL_SYNX
  15. #include <synx_api.h>
  16. #endif
  17. struct sync_device *sync_dev;
  18. /*
  19. * Flag to determine whether to enqueue cb of a
  20. * signaled fence onto the workq or invoke it
  21. * directly in the same context
  22. */
  23. static bool trigger_cb_without_switch;
  24. int cam_sync_create(int32_t *sync_obj, const char *name)
  25. {
  26. int rc;
  27. long idx;
  28. bool bit;
  29. do {
  30. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  31. if (idx >= CAM_SYNC_MAX_OBJS)
  32. return -ENOMEM;
  33. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  34. bit = test_and_set_bit(idx, sync_dev->bitmap);
  35. } while (bit);
  36. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  37. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  38. CAM_SYNC_TYPE_INDV);
  39. if (rc) {
  40. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  41. idx);
  42. clear_bit(idx, sync_dev->bitmap);
  43. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  44. return -EINVAL;
  45. }
  46. *sync_obj = idx;
  47. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  48. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  49. return rc;
  50. }
  51. int cam_sync_register_callback(sync_callback cb_func,
  52. void *userdata, int32_t sync_obj)
  53. {
  54. struct sync_callback_info *sync_cb;
  55. struct sync_table_row *row = NULL;
  56. int status = 0;
  57. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  58. return -EINVAL;
  59. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  60. row = sync_dev->sync_table + sync_obj;
  61. if (row->state == CAM_SYNC_STATE_INVALID) {
  62. CAM_ERR(CAM_SYNC,
  63. "Error: accessing an uninitialized sync obj %d",
  64. sync_obj);
  65. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  66. return -EINVAL;
  67. }
  68. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  69. if (!sync_cb) {
  70. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  71. return -ENOMEM;
  72. }
  73. /* Trigger callback if sync object is already in SIGNALED state */
  74. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  75. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
  76. (!row->remaining)) {
  77. if (trigger_cb_without_switch) {
  78. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  79. sync_obj);
  80. status = row->state;
  81. kfree(sync_cb);
  82. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  83. cb_func(sync_obj, status, userdata);
  84. } else {
  85. sync_cb->callback_func = cb_func;
  86. sync_cb->cb_data = userdata;
  87. sync_cb->sync_obj = sync_obj;
  88. INIT_WORK(&sync_cb->cb_dispatch_work,
  89. cam_sync_util_cb_dispatch);
  90. sync_cb->status = row->state;
  91. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  92. sync_cb->sync_obj);
  93. queue_work(sync_dev->work_queue,
  94. &sync_cb->cb_dispatch_work);
  95. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  96. }
  97. return 0;
  98. }
  99. sync_cb->callback_func = cb_func;
  100. sync_cb->cb_data = userdata;
  101. sync_cb->sync_obj = sync_obj;
  102. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  103. list_add_tail(&sync_cb->list, &row->callback_list);
  104. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  105. return 0;
  106. }
  107. int cam_sync_deregister_callback(sync_callback cb_func,
  108. void *userdata, int32_t sync_obj)
  109. {
  110. struct sync_table_row *row = NULL;
  111. struct sync_callback_info *sync_cb, *temp;
  112. bool found = false;
  113. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  114. return -EINVAL;
  115. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  116. row = sync_dev->sync_table + sync_obj;
  117. if (row->state == CAM_SYNC_STATE_INVALID) {
  118. CAM_ERR(CAM_SYNC,
  119. "Error: accessing an uninitialized sync obj = %d",
  120. sync_obj);
  121. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  122. return -EINVAL;
  123. }
  124. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  125. sync_obj);
  126. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  127. if (sync_cb->callback_func == cb_func &&
  128. sync_cb->cb_data == userdata) {
  129. list_del_init(&sync_cb->list);
  130. kfree(sync_cb);
  131. found = true;
  132. }
  133. }
  134. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  135. return found ? 0 : -ENOENT;
  136. }
  137. int cam_sync_signal(int32_t sync_obj, uint32_t status)
  138. {
  139. struct sync_table_row *row = NULL;
  140. struct sync_table_row *parent_row = NULL;
  141. struct sync_parent_info *parent_info, *temp_parent_info;
  142. struct list_head parents_list;
  143. int rc = 0;
  144. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  145. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  146. sync_obj, CAM_SYNC_MAX_OBJS);
  147. return -EINVAL;
  148. }
  149. row = sync_dev->sync_table + sync_obj;
  150. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  151. if (row->state == CAM_SYNC_STATE_INVALID) {
  152. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  153. CAM_ERR(CAM_SYNC,
  154. "Error: accessing an uninitialized sync obj = %d",
  155. sync_obj);
  156. return -EINVAL;
  157. }
  158. if (row->type == CAM_SYNC_TYPE_GROUP) {
  159. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  160. CAM_ERR(CAM_SYNC,
  161. "Error: Signaling a GROUP sync object = %d",
  162. sync_obj);
  163. return -EINVAL;
  164. }
  165. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  166. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  167. CAM_ERR(CAM_SYNC,
  168. "Error: Sync object already signaled sync_obj = %d",
  169. sync_obj);
  170. return -EALREADY;
  171. }
  172. if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
  173. status != CAM_SYNC_STATE_SIGNALED_ERROR) {
  174. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  175. CAM_ERR(CAM_SYNC,
  176. "Error: signaling with undefined status = %d",
  177. status);
  178. return -EINVAL;
  179. }
  180. if (!atomic_dec_and_test(&row->ref_cnt)) {
  181. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  182. return 0;
  183. }
  184. row->state = status;
  185. cam_sync_util_dispatch_signaled_cb(sync_obj, status);
  186. /* copy parent list to local and release child lock */
  187. INIT_LIST_HEAD(&parents_list);
  188. list_splice_init(&row->parents_list, &parents_list);
  189. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  190. if (list_empty(&parents_list))
  191. return 0;
  192. /*
  193. * Now iterate over all parents of this object and if they too need to
  194. * be signaled dispatch cb's
  195. */
  196. list_for_each_entry_safe(parent_info,
  197. temp_parent_info,
  198. &parents_list,
  199. list) {
  200. parent_row = sync_dev->sync_table + parent_info->sync_id;
  201. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  202. parent_row->remaining--;
  203. rc = cam_sync_util_update_parent_state(
  204. parent_row,
  205. status);
  206. if (rc) {
  207. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  208. parent_row->state);
  209. spin_unlock_bh(
  210. &sync_dev->row_spinlocks[parent_info->sync_id]);
  211. kfree(parent_info);
  212. continue;
  213. }
  214. if (!parent_row->remaining)
  215. cam_sync_util_dispatch_signaled_cb(
  216. parent_info->sync_id, parent_row->state);
  217. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  218. list_del_init(&parent_info->list);
  219. kfree(parent_info);
  220. }
  221. return 0;
  222. }
  223. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  224. {
  225. int rc;
  226. long idx = 0;
  227. bool bit;
  228. if (!sync_obj || !merged_obj) {
  229. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  230. return -EINVAL;
  231. }
  232. if (num_objs <= 1) {
  233. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  234. return -EINVAL;
  235. }
  236. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  237. != num_objs) {
  238. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  239. return -EINVAL;
  240. }
  241. do {
  242. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  243. if (idx >= CAM_SYNC_MAX_OBJS)
  244. return -ENOMEM;
  245. bit = test_and_set_bit(idx, sync_dev->bitmap);
  246. } while (bit);
  247. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  248. rc = cam_sync_init_group_object(sync_dev->sync_table,
  249. idx, sync_obj,
  250. num_objs);
  251. if (rc < 0) {
  252. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  253. idx);
  254. clear_bit(idx, sync_dev->bitmap);
  255. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  256. return -EINVAL;
  257. }
  258. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  259. *merged_obj = idx;
  260. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  261. return 0;
  262. }
  263. int cam_sync_get_obj_ref(int32_t sync_obj)
  264. {
  265. struct sync_table_row *row = NULL;
  266. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  267. return -EINVAL;
  268. row = sync_dev->sync_table + sync_obj;
  269. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  270. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  271. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  272. CAM_ERR(CAM_SYNC,
  273. "Error: accessing an uninitialized sync obj = %d",
  274. sync_obj);
  275. return -EINVAL;
  276. }
  277. atomic_inc(&row->ref_cnt);
  278. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  279. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  280. return 0;
  281. }
  282. int cam_sync_put_obj_ref(int32_t sync_obj)
  283. {
  284. struct sync_table_row *row = NULL;
  285. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  286. return -EINVAL;
  287. row = sync_dev->sync_table + sync_obj;
  288. atomic_dec(&row->ref_cnt);
  289. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  290. return 0;
  291. }
  292. int cam_sync_destroy(int32_t sync_obj)
  293. {
  294. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  295. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  296. }
  297. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  298. {
  299. unsigned long timeleft;
  300. int rc = -EINVAL;
  301. struct sync_table_row *row = NULL;
  302. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  303. return -EINVAL;
  304. row = sync_dev->sync_table + sync_obj;
  305. if (row->state == CAM_SYNC_STATE_INVALID) {
  306. CAM_ERR(CAM_SYNC,
  307. "Error: accessing an uninitialized sync obj = %d",
  308. sync_obj);
  309. return -EINVAL;
  310. }
  311. timeleft = wait_for_completion_timeout(&row->signaled,
  312. msecs_to_jiffies(timeout_ms));
  313. if (!timeleft) {
  314. CAM_ERR(CAM_SYNC,
  315. "Error: timed out for sync obj = %d", sync_obj);
  316. rc = -ETIMEDOUT;
  317. } else {
  318. switch (row->state) {
  319. case CAM_SYNC_STATE_INVALID:
  320. case CAM_SYNC_STATE_ACTIVE:
  321. case CAM_SYNC_STATE_SIGNALED_ERROR:
  322. CAM_ERR(CAM_SYNC,
  323. "Error: Wait on invalid state = %d, obj = %d",
  324. row->state, sync_obj);
  325. rc = -EINVAL;
  326. break;
  327. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  328. rc = 0;
  329. break;
  330. default:
  331. rc = -EINVAL;
  332. break;
  333. }
  334. }
  335. return rc;
  336. }
  337. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  338. {
  339. struct cam_sync_info sync_create;
  340. int result;
  341. if (k_ioctl->size != sizeof(struct cam_sync_info))
  342. return -EINVAL;
  343. if (!k_ioctl->ioctl_ptr)
  344. return -EINVAL;
  345. if (copy_from_user(&sync_create,
  346. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  347. k_ioctl->size))
  348. return -EFAULT;
  349. result = cam_sync_create(&sync_create.sync_obj,
  350. sync_create.name);
  351. if (!result)
  352. if (copy_to_user(
  353. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  354. &sync_create,
  355. k_ioctl->size))
  356. return -EFAULT;
  357. return result;
  358. }
  359. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  360. {
  361. int rc = 0;
  362. struct cam_sync_signal sync_signal;
  363. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  364. return -EINVAL;
  365. if (!k_ioctl->ioctl_ptr)
  366. return -EINVAL;
  367. if (copy_from_user(&sync_signal,
  368. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  369. k_ioctl->size))
  370. return -EFAULT;
  371. /* need to get ref for UMD signaled fences */
  372. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  373. if (rc) {
  374. CAM_DBG(CAM_SYNC,
  375. "Error: cannot signal an uninitialized sync obj = %d",
  376. sync_signal.sync_obj);
  377. return rc;
  378. }
  379. return cam_sync_signal(sync_signal.sync_obj,
  380. sync_signal.sync_state);
  381. }
  382. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  383. {
  384. struct cam_sync_merge sync_merge;
  385. uint32_t *sync_objs;
  386. uint32_t num_objs;
  387. uint32_t size;
  388. int result;
  389. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  390. return -EINVAL;
  391. if (!k_ioctl->ioctl_ptr)
  392. return -EINVAL;
  393. if (copy_from_user(&sync_merge,
  394. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  395. k_ioctl->size))
  396. return -EFAULT;
  397. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  398. return -EINVAL;
  399. size = sizeof(uint32_t) * sync_merge.num_objs;
  400. sync_objs = kzalloc(size, GFP_ATOMIC);
  401. if (!sync_objs)
  402. return -ENOMEM;
  403. if (copy_from_user(sync_objs,
  404. u64_to_user_ptr(sync_merge.sync_objs),
  405. sizeof(uint32_t) * sync_merge.num_objs)) {
  406. kfree(sync_objs);
  407. return -EFAULT;
  408. }
  409. num_objs = sync_merge.num_objs;
  410. result = cam_sync_merge(sync_objs,
  411. num_objs,
  412. &sync_merge.merged);
  413. if (!result)
  414. if (copy_to_user(
  415. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  416. &sync_merge,
  417. k_ioctl->size)) {
  418. kfree(sync_objs);
  419. return -EFAULT;
  420. }
  421. kfree(sync_objs);
  422. return result;
  423. }
  424. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  425. {
  426. struct cam_sync_wait sync_wait;
  427. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  428. return -EINVAL;
  429. if (!k_ioctl->ioctl_ptr)
  430. return -EINVAL;
  431. if (copy_from_user(&sync_wait,
  432. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  433. k_ioctl->size))
  434. return -EFAULT;
  435. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  436. sync_wait.timeout_ms);
  437. return 0;
  438. }
  439. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  440. {
  441. struct cam_sync_info sync_create;
  442. if (k_ioctl->size != sizeof(struct cam_sync_info))
  443. return -EINVAL;
  444. if (!k_ioctl->ioctl_ptr)
  445. return -EINVAL;
  446. if (copy_from_user(&sync_create,
  447. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  448. k_ioctl->size))
  449. return -EFAULT;
  450. return cam_sync_destroy(sync_create.sync_obj);
  451. }
  452. static int cam_sync_handle_register_user_payload(
  453. struct cam_private_ioctl_arg *k_ioctl)
  454. {
  455. struct cam_sync_userpayload_info userpayload_info;
  456. struct sync_user_payload *user_payload_kernel;
  457. struct sync_user_payload *user_payload_iter;
  458. struct sync_user_payload *temp_upayload_kernel;
  459. uint32_t sync_obj;
  460. struct sync_table_row *row = NULL;
  461. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  462. return -EINVAL;
  463. if (!k_ioctl->ioctl_ptr)
  464. return -EINVAL;
  465. if (copy_from_user(&userpayload_info,
  466. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  467. k_ioctl->size))
  468. return -EFAULT;
  469. sync_obj = userpayload_info.sync_obj;
  470. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  471. return -EINVAL;
  472. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  473. if (!user_payload_kernel)
  474. return -ENOMEM;
  475. memcpy(user_payload_kernel->payload_data,
  476. userpayload_info.payload,
  477. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  478. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  479. row = sync_dev->sync_table + sync_obj;
  480. if (row->state == CAM_SYNC_STATE_INVALID) {
  481. CAM_ERR(CAM_SYNC,
  482. "Error: accessing an uninitialized sync obj = %d",
  483. sync_obj);
  484. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  485. kfree(user_payload_kernel);
  486. return -EINVAL;
  487. }
  488. if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  489. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
  490. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  491. sync_obj,
  492. row->state,
  493. user_payload_kernel->payload_data,
  494. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
  495. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  496. kfree(user_payload_kernel);
  497. return 0;
  498. }
  499. list_for_each_entry_safe(user_payload_iter,
  500. temp_upayload_kernel,
  501. &row->user_payload_list,
  502. list) {
  503. if (user_payload_iter->payload_data[0] ==
  504. user_payload_kernel->payload_data[0] &&
  505. user_payload_iter->payload_data[1] ==
  506. user_payload_kernel->payload_data[1]) {
  507. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  508. kfree(user_payload_kernel);
  509. return -EALREADY;
  510. }
  511. }
  512. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  513. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  514. return 0;
  515. }
  516. static int cam_sync_handle_deregister_user_payload(
  517. struct cam_private_ioctl_arg *k_ioctl)
  518. {
  519. struct cam_sync_userpayload_info userpayload_info;
  520. struct sync_user_payload *user_payload_kernel, *temp;
  521. uint32_t sync_obj;
  522. struct sync_table_row *row = NULL;
  523. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  524. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  525. return -EINVAL;
  526. }
  527. if (!k_ioctl->ioctl_ptr) {
  528. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  529. return -EINVAL;
  530. }
  531. if (copy_from_user(&userpayload_info,
  532. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  533. k_ioctl->size))
  534. return -EFAULT;
  535. sync_obj = userpayload_info.sync_obj;
  536. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  537. return -EINVAL;
  538. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  539. row = sync_dev->sync_table + sync_obj;
  540. if (row->state == CAM_SYNC_STATE_INVALID) {
  541. CAM_ERR(CAM_SYNC,
  542. "Error: accessing an uninitialized sync obj = %d",
  543. sync_obj);
  544. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  545. return -EINVAL;
  546. }
  547. list_for_each_entry_safe(user_payload_kernel, temp,
  548. &row->user_payload_list, list) {
  549. if (user_payload_kernel->payload_data[0] ==
  550. userpayload_info.payload[0] &&
  551. user_payload_kernel->payload_data[1] ==
  552. userpayload_info.payload[1]) {
  553. list_del_init(&user_payload_kernel->list);
  554. kfree(user_payload_kernel);
  555. }
  556. }
  557. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  558. return 0;
  559. }
  560. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  561. bool valid_prio, unsigned int cmd, void *arg)
  562. {
  563. int32_t rc;
  564. struct sync_device *sync_dev = video_drvdata(filep);
  565. struct cam_private_ioctl_arg k_ioctl;
  566. if (!sync_dev) {
  567. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  568. return -EINVAL;
  569. }
  570. if (!arg)
  571. return -EINVAL;
  572. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  573. return -ENOIOCTLCMD;
  574. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  575. switch (k_ioctl.id) {
  576. case CAM_SYNC_CREATE:
  577. rc = cam_sync_handle_create(&k_ioctl);
  578. break;
  579. case CAM_SYNC_DESTROY:
  580. rc = cam_sync_handle_destroy(&k_ioctl);
  581. break;
  582. case CAM_SYNC_REGISTER_PAYLOAD:
  583. rc = cam_sync_handle_register_user_payload(
  584. &k_ioctl);
  585. break;
  586. case CAM_SYNC_DEREGISTER_PAYLOAD:
  587. rc = cam_sync_handle_deregister_user_payload(
  588. &k_ioctl);
  589. break;
  590. case CAM_SYNC_SIGNAL:
  591. rc = cam_sync_handle_signal(&k_ioctl);
  592. break;
  593. case CAM_SYNC_MERGE:
  594. rc = cam_sync_handle_merge(&k_ioctl);
  595. break;
  596. case CAM_SYNC_WAIT:
  597. rc = cam_sync_handle_wait(&k_ioctl);
  598. ((struct cam_private_ioctl_arg *)arg)->result =
  599. k_ioctl.result;
  600. break;
  601. default:
  602. rc = -ENOIOCTLCMD;
  603. }
  604. return rc;
  605. }
  606. static unsigned int cam_sync_poll(struct file *f,
  607. struct poll_table_struct *pll_table)
  608. {
  609. int rc = 0;
  610. struct v4l2_fh *eventq = f->private_data;
  611. if (!eventq)
  612. return -EINVAL;
  613. poll_wait(f, &eventq->wait, pll_table);
  614. if (v4l2_event_pending(eventq))
  615. rc = POLLPRI;
  616. return rc;
  617. }
  618. static int cam_sync_open(struct file *filep)
  619. {
  620. int rc;
  621. struct sync_device *sync_dev = video_drvdata(filep);
  622. if (!sync_dev) {
  623. CAM_ERR(CAM_SYNC, "Sync device NULL");
  624. return -ENODEV;
  625. }
  626. mutex_lock(&sync_dev->table_lock);
  627. if (sync_dev->open_cnt >= 1) {
  628. mutex_unlock(&sync_dev->table_lock);
  629. return -EALREADY;
  630. }
  631. rc = v4l2_fh_open(filep);
  632. if (!rc) {
  633. sync_dev->open_cnt++;
  634. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  635. sync_dev->cam_sync_eventq = filep->private_data;
  636. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  637. } else {
  638. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  639. }
  640. mutex_unlock(&sync_dev->table_lock);
  641. return rc;
  642. }
  643. static int cam_sync_close(struct file *filep)
  644. {
  645. int rc = 0;
  646. int i;
  647. struct sync_device *sync_dev = video_drvdata(filep);
  648. if (!sync_dev) {
  649. CAM_ERR(CAM_SYNC, "Sync device NULL");
  650. rc = -ENODEV;
  651. return rc;
  652. }
  653. mutex_lock(&sync_dev->table_lock);
  654. sync_dev->open_cnt--;
  655. if (!sync_dev->open_cnt) {
  656. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  657. struct sync_table_row *row =
  658. sync_dev->sync_table + i;
  659. /*
  660. * Signal all ACTIVE objects as ERR, but we don't
  661. * care about the return status here apart from logging
  662. * it.
  663. */
  664. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  665. rc = cam_sync_signal(i,
  666. CAM_SYNC_STATE_SIGNALED_ERROR);
  667. if (rc < 0)
  668. CAM_ERR(CAM_SYNC,
  669. "Cleanup signal fail idx:%d\n",
  670. i);
  671. }
  672. }
  673. /*
  674. * Flush the work queue to wait for pending signal callbacks to
  675. * finish
  676. */
  677. flush_workqueue(sync_dev->work_queue);
  678. /*
  679. * Now that all callbacks worker threads have finished,
  680. * destroy the sync objects
  681. */
  682. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  683. struct sync_table_row *row =
  684. sync_dev->sync_table + i;
  685. if (row->state != CAM_SYNC_STATE_INVALID) {
  686. rc = cam_sync_destroy(i);
  687. if (rc < 0)
  688. CAM_ERR(CAM_SYNC,
  689. "Cleanup destroy fail:idx:%d\n",
  690. i);
  691. }
  692. }
  693. }
  694. mutex_unlock(&sync_dev->table_lock);
  695. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  696. sync_dev->cam_sync_eventq = NULL;
  697. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  698. v4l2_fh_release(filep);
  699. return rc;
  700. }
  701. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  702. struct v4l2_event *new)
  703. {
  704. struct cam_sync_ev_header *ev_header;
  705. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  706. CAM_ERR(CAM_CRM, "Failed to notify event id %d fence %d statue %d",
  707. old->id, ev_header->sync_obj, ev_header->status);
  708. }
  709. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  710. .merge = cam_sync_event_queue_notify_error,
  711. };
  712. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  713. const struct v4l2_event_subscription *sub)
  714. {
  715. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  716. &cam_sync_v4l2_ops);
  717. }
  718. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  719. const struct v4l2_event_subscription *sub)
  720. {
  721. return v4l2_event_unsubscribe(fh, sub);
  722. }
  723. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  724. .vidioc_subscribe_event = cam_sync_subscribe_event,
  725. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  726. .vidioc_default = cam_sync_dev_ioctl,
  727. };
  728. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  729. .owner = THIS_MODULE,
  730. .open = cam_sync_open,
  731. .release = cam_sync_close,
  732. .poll = cam_sync_poll,
  733. .unlocked_ioctl = video_ioctl2,
  734. #ifdef CONFIG_COMPAT
  735. .compat_ioctl32 = video_ioctl2,
  736. #endif
  737. };
  738. #if defined(CONFIG_MEDIA_CONTROLLER)
  739. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  740. struct platform_device *pdev)
  741. {
  742. int rc;
  743. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  744. GFP_KERNEL);
  745. if (!sync_dev->v4l2_dev.mdev)
  746. return -ENOMEM;
  747. media_device_init(sync_dev->v4l2_dev.mdev);
  748. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  749. sizeof(sync_dev->v4l2_dev.mdev->model));
  750. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  751. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  752. if (rc < 0)
  753. goto register_fail;
  754. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  755. if (rc < 0)
  756. goto entity_fail;
  757. return 0;
  758. entity_fail:
  759. media_device_unregister(sync_dev->v4l2_dev.mdev);
  760. register_fail:
  761. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  762. return rc;
  763. }
  764. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  765. {
  766. media_entity_cleanup(&sync_dev->vdev->entity);
  767. media_device_unregister(sync_dev->v4l2_dev.mdev);
  768. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  769. kfree(sync_dev->v4l2_dev.mdev);
  770. }
  771. static void cam_sync_init_entity(struct sync_device *sync_dev)
  772. {
  773. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  774. sync_dev->vdev->entity.name =
  775. video_device_node_name(sync_dev->vdev);
  776. }
  777. #else
  778. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  779. struct platform_device *pdev)
  780. {
  781. return 0;
  782. }
  783. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  784. {
  785. }
  786. static void cam_sync_init_entity(struct sync_device *sync_dev)
  787. {
  788. }
  789. #endif
  790. static int cam_sync_create_debugfs(void)
  791. {
  792. sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
  793. if (!sync_dev->dentry) {
  794. CAM_ERR(CAM_SYNC, "Failed to create sync dir");
  795. return -ENOMEM;
  796. }
  797. if (!debugfs_create_bool("trigger_cb_without_switch",
  798. 0644, sync_dev->dentry,
  799. &trigger_cb_without_switch)) {
  800. CAM_ERR(CAM_SYNC,
  801. "failed to create trigger_cb_without_switch entry");
  802. return -ENOMEM;
  803. }
  804. return 0;
  805. }
  806. #ifdef CONFIG_MSM_GLOBAL_SYNX
  807. static void cam_sync_register_synx_bind_ops(void)
  808. {
  809. int rc = 0;
  810. struct synx_register_params params;
  811. params.name = CAM_SYNC_NAME;
  812. params.type = SYNX_TYPE_CSL;
  813. params.ops.register_callback = cam_sync_register_callback;
  814. params.ops.deregister_callback = cam_sync_deregister_callback;
  815. params.ops.enable_signaling = cam_sync_get_obj_ref;
  816. params.ops.signal = cam_sync_signal;
  817. rc = synx_register_ops(&params);
  818. if (rc)
  819. CAM_ERR(CAM_SYNC, "synx registration fail with %d", rc);
  820. }
  821. #endif
  822. static int cam_sync_probe(struct platform_device *pdev)
  823. {
  824. int rc;
  825. int idx;
  826. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  827. if (!sync_dev)
  828. return -ENOMEM;
  829. mutex_init(&sync_dev->table_lock);
  830. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  831. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  832. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  833. sync_dev->vdev = video_device_alloc();
  834. if (!sync_dev->vdev) {
  835. rc = -ENOMEM;
  836. goto vdev_fail;
  837. }
  838. rc = cam_sync_media_controller_init(sync_dev, pdev);
  839. if (rc < 0)
  840. goto mcinit_fail;
  841. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  842. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  843. if (rc < 0)
  844. goto register_fail;
  845. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  846. sizeof(sync_dev->vdev->name));
  847. sync_dev->vdev->release = video_device_release;
  848. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  849. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  850. sync_dev->vdev->minor = -1;
  851. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  852. rc = video_register_device(sync_dev->vdev,
  853. VFL_TYPE_GRABBER, -1);
  854. if (rc < 0)
  855. goto v4l2_fail;
  856. cam_sync_init_entity(sync_dev);
  857. video_set_drvdata(sync_dev->vdev, sync_dev);
  858. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  859. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  860. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  861. /*
  862. * We treat zero as invalid handle, so we will keep the 0th bit set
  863. * always
  864. */
  865. set_bit(0, sync_dev->bitmap);
  866. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  867. WQ_HIGHPRI | WQ_UNBOUND, 1);
  868. if (!sync_dev->work_queue) {
  869. CAM_ERR(CAM_SYNC,
  870. "Error: high priority work queue creation failed");
  871. rc = -ENOMEM;
  872. goto v4l2_fail;
  873. }
  874. trigger_cb_without_switch = false;
  875. cam_sync_create_debugfs();
  876. #ifdef CONFIG_MSM_GLOBAL_SYNX
  877. cam_sync_register_synx_bind_ops();
  878. #endif
  879. return rc;
  880. v4l2_fail:
  881. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  882. register_fail:
  883. cam_sync_media_controller_cleanup(sync_dev);
  884. mcinit_fail:
  885. video_device_release(sync_dev->vdev);
  886. vdev_fail:
  887. mutex_destroy(&sync_dev->table_lock);
  888. kfree(sync_dev);
  889. return rc;
  890. }
  891. static int cam_sync_remove(struct platform_device *pdev)
  892. {
  893. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  894. cam_sync_media_controller_cleanup(sync_dev);
  895. video_device_release(sync_dev->vdev);
  896. debugfs_remove_recursive(sync_dev->dentry);
  897. sync_dev->dentry = NULL;
  898. kfree(sync_dev);
  899. sync_dev = NULL;
  900. return 0;
  901. }
  902. static struct platform_device cam_sync_device = {
  903. .name = "cam_sync",
  904. .id = -1,
  905. };
  906. static struct platform_driver cam_sync_driver = {
  907. .probe = cam_sync_probe,
  908. .remove = cam_sync_remove,
  909. .driver = {
  910. .name = "cam_sync",
  911. .owner = THIS_MODULE,
  912. .suppress_bind_attrs = true,
  913. },
  914. };
  915. static int __init cam_sync_init(void)
  916. {
  917. int rc;
  918. rc = platform_device_register(&cam_sync_device);
  919. if (rc)
  920. return -ENODEV;
  921. return platform_driver_register(&cam_sync_driver);
  922. }
  923. static void __exit cam_sync_exit(void)
  924. {
  925. int idx;
  926. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  927. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  928. platform_driver_unregister(&cam_sync_driver);
  929. platform_device_unregister(&cam_sync_device);
  930. kfree(sync_dev);
  931. }
  932. module_init(cam_sync_init);
  933. module_exit(cam_sync_exit);
  934. MODULE_DESCRIPTION("Camera sync driver");
  935. MODULE_LICENSE("GPL v2");