cam_sync.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "camera_main.h"
  18. struct sync_device *sync_dev;
  19. /*
  20. * Flag to determine whether to enqueue cb of a
  21. * signaled fence onto the workq or invoke it
  22. * directly in the same context
  23. */
  24. static bool trigger_cb_without_switch;
  25. static void cam_sync_print_fence_table(void)
  26. {
  27. int idx;
  28. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  29. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  30. CAM_INFO(CAM_SYNC,
  31. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  32. idx,
  33. sync_dev->sync_table[idx].sync_id,
  34. sync_dev->sync_table[idx].name,
  35. sync_dev->sync_table[idx].type,
  36. sync_dev->sync_table[idx].state,
  37. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  38. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  39. }
  40. }
  41. int cam_sync_create(int32_t *sync_obj, const char *name)
  42. {
  43. int rc;
  44. long idx;
  45. bool bit;
  46. do {
  47. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  48. if (idx >= CAM_SYNC_MAX_OBJS) {
  49. CAM_ERR(CAM_SYNC,
  50. "Error: Unable to create sync idx = %d reached max!",
  51. idx);
  52. cam_sync_print_fence_table();
  53. return -ENOMEM;
  54. }
  55. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  56. bit = test_and_set_bit(idx, sync_dev->bitmap);
  57. } while (bit);
  58. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  59. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  60. CAM_SYNC_TYPE_INDV);
  61. if (rc) {
  62. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  63. idx);
  64. clear_bit(idx, sync_dev->bitmap);
  65. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  66. return -EINVAL;
  67. }
  68. *sync_obj = idx;
  69. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  70. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  71. return rc;
  72. }
  73. int cam_sync_register_callback(sync_callback cb_func,
  74. void *userdata, int32_t sync_obj)
  75. {
  76. struct sync_callback_info *sync_cb;
  77. struct sync_table_row *row = NULL;
  78. int status = 0;
  79. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  80. return -EINVAL;
  81. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  82. row = sync_dev->sync_table + sync_obj;
  83. if (row->state == CAM_SYNC_STATE_INVALID) {
  84. CAM_ERR(CAM_SYNC,
  85. "Error: accessing an uninitialized sync obj %d",
  86. sync_obj);
  87. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  88. return -EINVAL;
  89. }
  90. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  91. if (!sync_cb) {
  92. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  93. return -ENOMEM;
  94. }
  95. /* Trigger callback if sync object is already in SIGNALED state */
  96. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  97. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
  98. (!row->remaining)) {
  99. if (trigger_cb_without_switch) {
  100. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  101. sync_obj);
  102. status = row->state;
  103. kfree(sync_cb);
  104. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  105. cb_func(sync_obj, status, userdata);
  106. } else {
  107. sync_cb->callback_func = cb_func;
  108. sync_cb->cb_data = userdata;
  109. sync_cb->sync_obj = sync_obj;
  110. INIT_WORK(&sync_cb->cb_dispatch_work,
  111. cam_sync_util_cb_dispatch);
  112. sync_cb->status = row->state;
  113. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  114. sync_cb->sync_obj);
  115. queue_work(sync_dev->work_queue,
  116. &sync_cb->cb_dispatch_work);
  117. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  118. }
  119. return 0;
  120. }
  121. sync_cb->callback_func = cb_func;
  122. sync_cb->cb_data = userdata;
  123. sync_cb->sync_obj = sync_obj;
  124. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  125. list_add_tail(&sync_cb->list, &row->callback_list);
  126. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  127. return 0;
  128. }
  129. int cam_sync_deregister_callback(sync_callback cb_func,
  130. void *userdata, int32_t sync_obj)
  131. {
  132. struct sync_table_row *row = NULL;
  133. struct sync_callback_info *sync_cb, *temp;
  134. bool found = false;
  135. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  136. return -EINVAL;
  137. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  138. row = sync_dev->sync_table + sync_obj;
  139. if (row->state == CAM_SYNC_STATE_INVALID) {
  140. CAM_ERR(CAM_SYNC,
  141. "Error: accessing an uninitialized sync obj = %d",
  142. sync_obj);
  143. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  144. return -EINVAL;
  145. }
  146. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  147. sync_obj);
  148. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  149. if (sync_cb->callback_func == cb_func &&
  150. sync_cb->cb_data == userdata) {
  151. list_del_init(&sync_cb->list);
  152. kfree(sync_cb);
  153. found = true;
  154. }
  155. }
  156. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  157. return found ? 0 : -ENOENT;
  158. }
  159. int cam_sync_signal(int32_t sync_obj, uint32_t status)
  160. {
  161. struct sync_table_row *row = NULL;
  162. struct sync_table_row *parent_row = NULL;
  163. struct sync_parent_info *parent_info, *temp_parent_info;
  164. struct list_head parents_list;
  165. int rc = 0;
  166. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  167. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  168. sync_obj, CAM_SYNC_MAX_OBJS);
  169. return -EINVAL;
  170. }
  171. row = sync_dev->sync_table + sync_obj;
  172. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  173. if (row->state == CAM_SYNC_STATE_INVALID) {
  174. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  175. CAM_ERR(CAM_SYNC,
  176. "Error: accessing an uninitialized sync obj = %d",
  177. sync_obj);
  178. return -EINVAL;
  179. }
  180. if (row->type == CAM_SYNC_TYPE_GROUP) {
  181. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  182. CAM_ERR(CAM_SYNC,
  183. "Error: Signaling a GROUP sync object = %d",
  184. sync_obj);
  185. return -EINVAL;
  186. }
  187. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  188. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  189. CAM_ERR(CAM_SYNC,
  190. "Error: Sync object already signaled sync_obj = %d",
  191. sync_obj);
  192. return -EALREADY;
  193. }
  194. if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
  195. status != CAM_SYNC_STATE_SIGNALED_ERROR) {
  196. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  197. CAM_ERR(CAM_SYNC,
  198. "Error: signaling with undefined status = %d",
  199. status);
  200. return -EINVAL;
  201. }
  202. if (!atomic_dec_and_test(&row->ref_cnt)) {
  203. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  204. return 0;
  205. }
  206. row->state = status;
  207. cam_sync_util_dispatch_signaled_cb(sync_obj, status);
  208. /* copy parent list to local and release child lock */
  209. INIT_LIST_HEAD(&parents_list);
  210. list_splice_init(&row->parents_list, &parents_list);
  211. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  212. if (list_empty(&parents_list))
  213. return 0;
  214. /*
  215. * Now iterate over all parents of this object and if they too need to
  216. * be signaled dispatch cb's
  217. */
  218. list_for_each_entry_safe(parent_info,
  219. temp_parent_info,
  220. &parents_list,
  221. list) {
  222. parent_row = sync_dev->sync_table + parent_info->sync_id;
  223. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  224. parent_row->remaining--;
  225. rc = cam_sync_util_update_parent_state(
  226. parent_row,
  227. status);
  228. if (rc) {
  229. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  230. parent_row->state);
  231. spin_unlock_bh(
  232. &sync_dev->row_spinlocks[parent_info->sync_id]);
  233. kfree(parent_info);
  234. continue;
  235. }
  236. if (!parent_row->remaining)
  237. cam_sync_util_dispatch_signaled_cb(
  238. parent_info->sync_id, parent_row->state);
  239. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  240. list_del_init(&parent_info->list);
  241. kfree(parent_info);
  242. }
  243. return 0;
  244. }
  245. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  246. {
  247. int rc;
  248. long idx = 0;
  249. bool bit;
  250. int i = 0;
  251. if (!sync_obj || !merged_obj) {
  252. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  253. return -EINVAL;
  254. }
  255. if (num_objs <= 1) {
  256. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  257. return -EINVAL;
  258. }
  259. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  260. != num_objs) {
  261. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  262. return -EINVAL;
  263. }
  264. for (i = 0; i < num_objs; i++) {
  265. rc = cam_sync_check_valid(sync_obj[i]);
  266. if (rc) {
  267. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  268. i, sync_obj[i]);
  269. return rc;
  270. }
  271. }
  272. do {
  273. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  274. if (idx >= CAM_SYNC_MAX_OBJS)
  275. return -ENOMEM;
  276. bit = test_and_set_bit(idx, sync_dev->bitmap);
  277. } while (bit);
  278. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  279. rc = cam_sync_init_group_object(sync_dev->sync_table,
  280. idx, sync_obj,
  281. num_objs);
  282. if (rc < 0) {
  283. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  284. idx);
  285. clear_bit(idx, sync_dev->bitmap);
  286. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  287. return -EINVAL;
  288. }
  289. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  290. *merged_obj = idx;
  291. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  292. return 0;
  293. }
  294. int cam_sync_get_obj_ref(int32_t sync_obj)
  295. {
  296. struct sync_table_row *row = NULL;
  297. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  298. return -EINVAL;
  299. row = sync_dev->sync_table + sync_obj;
  300. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  301. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  302. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  303. CAM_ERR(CAM_SYNC,
  304. "Error: accessing an uninitialized sync obj = %d",
  305. sync_obj);
  306. return -EINVAL;
  307. }
  308. atomic_inc(&row->ref_cnt);
  309. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  310. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  311. return 0;
  312. }
  313. int cam_sync_put_obj_ref(int32_t sync_obj)
  314. {
  315. struct sync_table_row *row = NULL;
  316. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  317. return -EINVAL;
  318. row = sync_dev->sync_table + sync_obj;
  319. atomic_dec(&row->ref_cnt);
  320. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  321. return 0;
  322. }
  323. int cam_sync_destroy(int32_t sync_obj)
  324. {
  325. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  326. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  327. }
  328. int cam_sync_check_valid(int32_t sync_obj)
  329. {
  330. struct sync_table_row *row = NULL;
  331. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  332. return -EINVAL;
  333. row = sync_dev->sync_table + sync_obj;
  334. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  335. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %d",
  336. sync_obj);
  337. return -EINVAL;
  338. }
  339. if (row->state == CAM_SYNC_STATE_INVALID) {
  340. CAM_ERR(CAM_SYNC,
  341. "Error: accessing an uninitialized sync obj = %d",
  342. sync_obj);
  343. return -EINVAL;
  344. }
  345. return 0;
  346. }
  347. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  348. {
  349. unsigned long timeleft;
  350. int rc = -EINVAL;
  351. struct sync_table_row *row = NULL;
  352. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  353. return -EINVAL;
  354. row = sync_dev->sync_table + sync_obj;
  355. if (row->state == CAM_SYNC_STATE_INVALID) {
  356. CAM_ERR(CAM_SYNC,
  357. "Error: accessing an uninitialized sync obj = %d",
  358. sync_obj);
  359. return -EINVAL;
  360. }
  361. timeleft = wait_for_completion_timeout(&row->signaled,
  362. msecs_to_jiffies(timeout_ms));
  363. if (!timeleft) {
  364. CAM_ERR(CAM_SYNC,
  365. "Error: timed out for sync obj = %d", sync_obj);
  366. rc = -ETIMEDOUT;
  367. } else {
  368. switch (row->state) {
  369. case CAM_SYNC_STATE_INVALID:
  370. case CAM_SYNC_STATE_ACTIVE:
  371. case CAM_SYNC_STATE_SIGNALED_ERROR:
  372. CAM_ERR(CAM_SYNC,
  373. "Error: Wait on invalid state = %d, obj = %d",
  374. row->state, sync_obj);
  375. rc = -EINVAL;
  376. break;
  377. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  378. rc = 0;
  379. break;
  380. default:
  381. rc = -EINVAL;
  382. break;
  383. }
  384. }
  385. return rc;
  386. }
  387. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  388. {
  389. struct cam_sync_info sync_create;
  390. int result;
  391. if (k_ioctl->size != sizeof(struct cam_sync_info))
  392. return -EINVAL;
  393. if (!k_ioctl->ioctl_ptr)
  394. return -EINVAL;
  395. if (copy_from_user(&sync_create,
  396. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  397. k_ioctl->size))
  398. return -EFAULT;
  399. result = cam_sync_create(&sync_create.sync_obj,
  400. sync_create.name);
  401. if (!result)
  402. if (copy_to_user(
  403. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  404. &sync_create,
  405. k_ioctl->size))
  406. return -EFAULT;
  407. return result;
  408. }
  409. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  410. {
  411. int rc = 0;
  412. struct cam_sync_signal sync_signal;
  413. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  414. return -EINVAL;
  415. if (!k_ioctl->ioctl_ptr)
  416. return -EINVAL;
  417. if (copy_from_user(&sync_signal,
  418. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  419. k_ioctl->size))
  420. return -EFAULT;
  421. /* need to get ref for UMD signaled fences */
  422. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  423. if (rc) {
  424. CAM_DBG(CAM_SYNC,
  425. "Error: cannot signal an uninitialized sync obj = %d",
  426. sync_signal.sync_obj);
  427. return rc;
  428. }
  429. return cam_sync_signal(sync_signal.sync_obj,
  430. sync_signal.sync_state);
  431. }
  432. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  433. {
  434. struct cam_sync_merge sync_merge;
  435. uint32_t *sync_objs;
  436. uint32_t num_objs;
  437. uint32_t size;
  438. int result;
  439. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  440. return -EINVAL;
  441. if (!k_ioctl->ioctl_ptr)
  442. return -EINVAL;
  443. if (copy_from_user(&sync_merge,
  444. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  445. k_ioctl->size))
  446. return -EFAULT;
  447. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  448. return -EINVAL;
  449. size = sizeof(uint32_t) * sync_merge.num_objs;
  450. sync_objs = kzalloc(size, GFP_ATOMIC);
  451. if (!sync_objs)
  452. return -ENOMEM;
  453. if (copy_from_user(sync_objs,
  454. u64_to_user_ptr(sync_merge.sync_objs),
  455. sizeof(uint32_t) * sync_merge.num_objs)) {
  456. kfree(sync_objs);
  457. return -EFAULT;
  458. }
  459. num_objs = sync_merge.num_objs;
  460. result = cam_sync_merge(sync_objs,
  461. num_objs,
  462. &sync_merge.merged);
  463. if (!result)
  464. if (copy_to_user(
  465. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  466. &sync_merge,
  467. k_ioctl->size)) {
  468. kfree(sync_objs);
  469. return -EFAULT;
  470. }
  471. kfree(sync_objs);
  472. return result;
  473. }
  474. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  475. {
  476. struct cam_sync_wait sync_wait;
  477. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  478. return -EINVAL;
  479. if (!k_ioctl->ioctl_ptr)
  480. return -EINVAL;
  481. if (copy_from_user(&sync_wait,
  482. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  483. k_ioctl->size))
  484. return -EFAULT;
  485. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  486. sync_wait.timeout_ms);
  487. return 0;
  488. }
  489. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  490. {
  491. struct cam_sync_info sync_create;
  492. if (k_ioctl->size != sizeof(struct cam_sync_info))
  493. return -EINVAL;
  494. if (!k_ioctl->ioctl_ptr)
  495. return -EINVAL;
  496. if (copy_from_user(&sync_create,
  497. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  498. k_ioctl->size))
  499. return -EFAULT;
  500. return cam_sync_destroy(sync_create.sync_obj);
  501. }
  502. static int cam_sync_handle_register_user_payload(
  503. struct cam_private_ioctl_arg *k_ioctl)
  504. {
  505. struct cam_sync_userpayload_info userpayload_info;
  506. struct sync_user_payload *user_payload_kernel;
  507. struct sync_user_payload *user_payload_iter;
  508. struct sync_user_payload *temp_upayload_kernel;
  509. uint32_t sync_obj;
  510. struct sync_table_row *row = NULL;
  511. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  512. return -EINVAL;
  513. if (!k_ioctl->ioctl_ptr)
  514. return -EINVAL;
  515. if (copy_from_user(&userpayload_info,
  516. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  517. k_ioctl->size))
  518. return -EFAULT;
  519. sync_obj = userpayload_info.sync_obj;
  520. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  521. return -EINVAL;
  522. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  523. if (!user_payload_kernel)
  524. return -ENOMEM;
  525. memcpy(user_payload_kernel->payload_data,
  526. userpayload_info.payload,
  527. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  528. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  529. row = sync_dev->sync_table + sync_obj;
  530. if (row->state == CAM_SYNC_STATE_INVALID) {
  531. CAM_ERR(CAM_SYNC,
  532. "Error: accessing an uninitialized sync obj = %d",
  533. sync_obj);
  534. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  535. kfree(user_payload_kernel);
  536. return -EINVAL;
  537. }
  538. if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
  539. row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
  540. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  541. sync_obj,
  542. row->state,
  543. user_payload_kernel->payload_data,
  544. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
  545. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  546. kfree(user_payload_kernel);
  547. return 0;
  548. }
  549. list_for_each_entry_safe(user_payload_iter,
  550. temp_upayload_kernel,
  551. &row->user_payload_list,
  552. list) {
  553. if (user_payload_iter->payload_data[0] ==
  554. user_payload_kernel->payload_data[0] &&
  555. user_payload_iter->payload_data[1] ==
  556. user_payload_kernel->payload_data[1]) {
  557. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  558. kfree(user_payload_kernel);
  559. return -EALREADY;
  560. }
  561. }
  562. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  563. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  564. return 0;
  565. }
  566. static int cam_sync_handle_deregister_user_payload(
  567. struct cam_private_ioctl_arg *k_ioctl)
  568. {
  569. struct cam_sync_userpayload_info userpayload_info;
  570. struct sync_user_payload *user_payload_kernel, *temp;
  571. uint32_t sync_obj;
  572. struct sync_table_row *row = NULL;
  573. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  574. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  575. return -EINVAL;
  576. }
  577. if (!k_ioctl->ioctl_ptr) {
  578. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  579. return -EINVAL;
  580. }
  581. if (copy_from_user(&userpayload_info,
  582. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  583. k_ioctl->size))
  584. return -EFAULT;
  585. sync_obj = userpayload_info.sync_obj;
  586. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  587. return -EINVAL;
  588. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  589. row = sync_dev->sync_table + sync_obj;
  590. if (row->state == CAM_SYNC_STATE_INVALID) {
  591. CAM_ERR(CAM_SYNC,
  592. "Error: accessing an uninitialized sync obj = %d",
  593. sync_obj);
  594. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  595. return -EINVAL;
  596. }
  597. list_for_each_entry_safe(user_payload_kernel, temp,
  598. &row->user_payload_list, list) {
  599. if (user_payload_kernel->payload_data[0] ==
  600. userpayload_info.payload[0] &&
  601. user_payload_kernel->payload_data[1] ==
  602. userpayload_info.payload[1]) {
  603. list_del_init(&user_payload_kernel->list);
  604. kfree(user_payload_kernel);
  605. }
  606. }
  607. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  608. return 0;
  609. }
  610. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  611. bool valid_prio, unsigned int cmd, void *arg)
  612. {
  613. int32_t rc;
  614. struct sync_device *sync_dev = video_drvdata(filep);
  615. struct cam_private_ioctl_arg k_ioctl;
  616. if (!sync_dev) {
  617. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  618. return -EINVAL;
  619. }
  620. if (!arg)
  621. return -EINVAL;
  622. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  623. return -ENOIOCTLCMD;
  624. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  625. switch (k_ioctl.id) {
  626. case CAM_SYNC_CREATE:
  627. rc = cam_sync_handle_create(&k_ioctl);
  628. break;
  629. case CAM_SYNC_DESTROY:
  630. rc = cam_sync_handle_destroy(&k_ioctl);
  631. break;
  632. case CAM_SYNC_REGISTER_PAYLOAD:
  633. rc = cam_sync_handle_register_user_payload(
  634. &k_ioctl);
  635. break;
  636. case CAM_SYNC_DEREGISTER_PAYLOAD:
  637. rc = cam_sync_handle_deregister_user_payload(
  638. &k_ioctl);
  639. break;
  640. case CAM_SYNC_SIGNAL:
  641. rc = cam_sync_handle_signal(&k_ioctl);
  642. break;
  643. case CAM_SYNC_MERGE:
  644. rc = cam_sync_handle_merge(&k_ioctl);
  645. break;
  646. case CAM_SYNC_WAIT:
  647. rc = cam_sync_handle_wait(&k_ioctl);
  648. ((struct cam_private_ioctl_arg *)arg)->result =
  649. k_ioctl.result;
  650. break;
  651. default:
  652. rc = -ENOIOCTLCMD;
  653. }
  654. return rc;
  655. }
  656. static unsigned int cam_sync_poll(struct file *f,
  657. struct poll_table_struct *pll_table)
  658. {
  659. int rc = 0;
  660. struct v4l2_fh *eventq = f->private_data;
  661. if (!eventq)
  662. return -EINVAL;
  663. poll_wait(f, &eventq->wait, pll_table);
  664. if (v4l2_event_pending(eventq))
  665. rc = POLLPRI;
  666. return rc;
  667. }
  668. static int cam_sync_open(struct file *filep)
  669. {
  670. int rc;
  671. struct sync_device *sync_dev = video_drvdata(filep);
  672. if (!sync_dev) {
  673. CAM_ERR(CAM_SYNC, "Sync device NULL");
  674. return -ENODEV;
  675. }
  676. mutex_lock(&sync_dev->table_lock);
  677. if (sync_dev->open_cnt >= 1) {
  678. mutex_unlock(&sync_dev->table_lock);
  679. return -EALREADY;
  680. }
  681. rc = v4l2_fh_open(filep);
  682. if (!rc) {
  683. sync_dev->open_cnt++;
  684. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  685. sync_dev->cam_sync_eventq = filep->private_data;
  686. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  687. } else {
  688. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  689. }
  690. mutex_unlock(&sync_dev->table_lock);
  691. return rc;
  692. }
  693. static int cam_sync_close(struct file *filep)
  694. {
  695. int rc = 0;
  696. int i;
  697. struct sync_device *sync_dev = video_drvdata(filep);
  698. if (!sync_dev) {
  699. CAM_ERR(CAM_SYNC, "Sync device NULL");
  700. rc = -ENODEV;
  701. return rc;
  702. }
  703. mutex_lock(&sync_dev->table_lock);
  704. sync_dev->open_cnt--;
  705. if (!sync_dev->open_cnt) {
  706. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  707. struct sync_table_row *row =
  708. sync_dev->sync_table + i;
  709. /*
  710. * Signal all ACTIVE objects as ERR, but we don't
  711. * care about the return status here apart from logging
  712. * it.
  713. */
  714. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  715. rc = cam_sync_signal(i,
  716. CAM_SYNC_STATE_SIGNALED_ERROR);
  717. if (rc < 0)
  718. CAM_ERR(CAM_SYNC,
  719. "Cleanup signal fail idx:%d\n",
  720. i);
  721. }
  722. }
  723. /*
  724. * Flush the work queue to wait for pending signal callbacks to
  725. * finish
  726. */
  727. flush_workqueue(sync_dev->work_queue);
  728. /*
  729. * Now that all callbacks worker threads have finished,
  730. * destroy the sync objects
  731. */
  732. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  733. struct sync_table_row *row =
  734. sync_dev->sync_table + i;
  735. if (row->state != CAM_SYNC_STATE_INVALID) {
  736. rc = cam_sync_destroy(i);
  737. if (rc < 0)
  738. CAM_ERR(CAM_SYNC,
  739. "Cleanup destroy fail:idx:%d\n",
  740. i);
  741. }
  742. }
  743. }
  744. mutex_unlock(&sync_dev->table_lock);
  745. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  746. sync_dev->cam_sync_eventq = NULL;
  747. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  748. v4l2_fh_release(filep);
  749. return rc;
  750. }
  751. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  752. struct v4l2_event *new)
  753. {
  754. struct cam_sync_ev_header *ev_header;
  755. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  756. CAM_ERR(CAM_CRM, "Failed to notify event id %d fence %d statue %d",
  757. old->id, ev_header->sync_obj, ev_header->status);
  758. }
  759. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  760. .merge = cam_sync_event_queue_notify_error,
  761. };
  762. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  763. const struct v4l2_event_subscription *sub)
  764. {
  765. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  766. &cam_sync_v4l2_ops);
  767. }
  768. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  769. const struct v4l2_event_subscription *sub)
  770. {
  771. return v4l2_event_unsubscribe(fh, sub);
  772. }
  773. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  774. .vidioc_subscribe_event = cam_sync_subscribe_event,
  775. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  776. .vidioc_default = cam_sync_dev_ioctl,
  777. };
  778. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  779. .owner = THIS_MODULE,
  780. .open = cam_sync_open,
  781. .release = cam_sync_close,
  782. .poll = cam_sync_poll,
  783. .unlocked_ioctl = video_ioctl2,
  784. #ifdef CONFIG_COMPAT
  785. .compat_ioctl32 = video_ioctl2,
  786. #endif
  787. };
  788. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  789. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  790. struct platform_device *pdev)
  791. {
  792. int rc;
  793. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  794. GFP_KERNEL);
  795. if (!sync_dev->v4l2_dev.mdev)
  796. return -ENOMEM;
  797. media_device_init(sync_dev->v4l2_dev.mdev);
  798. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  799. sizeof(sync_dev->v4l2_dev.mdev->model));
  800. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  801. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  802. if (rc < 0)
  803. goto register_fail;
  804. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  805. if (rc < 0)
  806. goto entity_fail;
  807. return 0;
  808. entity_fail:
  809. media_device_unregister(sync_dev->v4l2_dev.mdev);
  810. register_fail:
  811. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  812. return rc;
  813. }
  814. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  815. {
  816. media_entity_cleanup(&sync_dev->vdev->entity);
  817. media_device_unregister(sync_dev->v4l2_dev.mdev);
  818. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  819. kfree(sync_dev->v4l2_dev.mdev);
  820. }
  821. static void cam_sync_init_entity(struct sync_device *sync_dev)
  822. {
  823. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  824. sync_dev->vdev->entity.name =
  825. video_device_node_name(sync_dev->vdev);
  826. }
  827. #else
  828. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  829. struct platform_device *pdev)
  830. {
  831. return 0;
  832. }
  833. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  834. {
  835. }
  836. static void cam_sync_init_entity(struct sync_device *sync_dev)
  837. {
  838. }
  839. #endif
  840. static int cam_sync_create_debugfs(void)
  841. {
  842. sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
  843. if (!sync_dev->dentry) {
  844. CAM_ERR(CAM_SYNC, "Failed to create sync dir");
  845. return -ENOMEM;
  846. }
  847. if (!debugfs_create_bool("trigger_cb_without_switch",
  848. 0644, sync_dev->dentry,
  849. &trigger_cb_without_switch)) {
  850. CAM_ERR(CAM_SYNC,
  851. "failed to create trigger_cb_without_switch entry");
  852. return -ENOMEM;
  853. }
  854. return 0;
  855. }
  856. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  857. static int cam_sync_register_synx_bind_ops(void)
  858. {
  859. int rc = 0;
  860. struct synx_register_params params;
  861. params.name = CAM_SYNC_NAME;
  862. params.type = SYNX_TYPE_CSL;
  863. params.ops.register_callback = cam_sync_register_callback;
  864. params.ops.deregister_callback = cam_sync_deregister_callback;
  865. params.ops.enable_signaling = cam_sync_get_obj_ref;
  866. params.ops.signal = cam_sync_signal;
  867. rc = synx_register_ops(&params);
  868. if (rc)
  869. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  870. return rc;
  871. }
  872. #endif
  873. static int cam_sync_component_bind(struct device *dev,
  874. struct device *master_dev, void *data)
  875. {
  876. int rc;
  877. int idx;
  878. struct platform_device *pdev = to_platform_device(dev);
  879. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  880. if (!sync_dev)
  881. return -ENOMEM;
  882. mutex_init(&sync_dev->table_lock);
  883. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  884. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  885. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  886. sync_dev->vdev = video_device_alloc();
  887. if (!sync_dev->vdev) {
  888. rc = -ENOMEM;
  889. goto vdev_fail;
  890. }
  891. rc = cam_sync_media_controller_init(sync_dev, pdev);
  892. if (rc < 0)
  893. goto mcinit_fail;
  894. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  895. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  896. if (rc < 0)
  897. goto register_fail;
  898. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  899. sizeof(sync_dev->vdev->name));
  900. sync_dev->vdev->release = video_device_release_empty;
  901. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  902. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  903. sync_dev->vdev->minor = -1;
  904. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  905. sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
  906. rc = video_register_device(sync_dev->vdev,
  907. VFL_TYPE_GRABBER, -1);
  908. if (rc < 0) {
  909. CAM_ERR(CAM_SYNC,
  910. "video device registration failure rc = %d, name = %s, device_caps = %d",
  911. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  912. goto v4l2_fail;
  913. }
  914. cam_sync_init_entity(sync_dev);
  915. video_set_drvdata(sync_dev->vdev, sync_dev);
  916. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  917. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  918. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  919. /*
  920. * We treat zero as invalid handle, so we will keep the 0th bit set
  921. * always
  922. */
  923. set_bit(0, sync_dev->bitmap);
  924. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  925. WQ_HIGHPRI | WQ_UNBOUND, 1);
  926. if (!sync_dev->work_queue) {
  927. CAM_ERR(CAM_SYNC,
  928. "Error: high priority work queue creation failed");
  929. rc = -ENOMEM;
  930. goto v4l2_fail;
  931. }
  932. trigger_cb_without_switch = false;
  933. cam_sync_create_debugfs();
  934. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  935. CAM_INFO(CAM_SYNC, "Registering with synx driver");
  936. rc = cam_sync_register_synx_bind_ops();
  937. if (rc)
  938. goto v4l2_fail;
  939. #endif
  940. CAM_DBG(CAM_SYNC, "Component bound successfully");
  941. return rc;
  942. v4l2_fail:
  943. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  944. register_fail:
  945. cam_sync_media_controller_cleanup(sync_dev);
  946. mcinit_fail:
  947. video_unregister_device(sync_dev->vdev);
  948. video_device_release(sync_dev->vdev);
  949. vdev_fail:
  950. mutex_destroy(&sync_dev->table_lock);
  951. kfree(sync_dev);
  952. return rc;
  953. }
  954. static void cam_sync_component_unbind(struct device *dev,
  955. struct device *master_dev, void *data)
  956. {
  957. int i;
  958. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  959. cam_sync_media_controller_cleanup(sync_dev);
  960. video_unregister_device(sync_dev->vdev);
  961. video_device_release(sync_dev->vdev);
  962. debugfs_remove_recursive(sync_dev->dentry);
  963. sync_dev->dentry = NULL;
  964. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  965. spin_lock_init(&sync_dev->row_spinlocks[i]);
  966. kfree(sync_dev);
  967. sync_dev = NULL;
  968. }
  969. const static struct component_ops cam_sync_component_ops = {
  970. .bind = cam_sync_component_bind,
  971. .unbind = cam_sync_component_unbind,
  972. };
  973. static int cam_sync_probe(struct platform_device *pdev)
  974. {
  975. int rc = 0;
  976. CAM_DBG(CAM_SYNC, "Adding Sync component");
  977. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  978. if (rc)
  979. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  980. return rc;
  981. }
  982. static int cam_sync_remove(struct platform_device *pdev)
  983. {
  984. component_del(&pdev->dev, &cam_sync_component_ops);
  985. return 0;
  986. }
  987. static const struct of_device_id cam_sync_dt_match[] = {
  988. {.compatible = "qcom,cam-sync"},
  989. {}
  990. };
  991. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  992. struct platform_driver cam_sync_driver = {
  993. .probe = cam_sync_probe,
  994. .remove = cam_sync_remove,
  995. .driver = {
  996. .name = "cam_sync",
  997. .owner = THIS_MODULE,
  998. .of_match_table = cam_sync_dt_match,
  999. .suppress_bind_attrs = true,
  1000. },
  1001. };
  1002. int cam_sync_init(void)
  1003. {
  1004. return platform_driver_register(&cam_sync_driver);
  1005. }
  1006. void cam_sync_exit(void)
  1007. {
  1008. platform_driver_unregister(&cam_sync_driver);
  1009. }
  1010. MODULE_DESCRIPTION("Camera sync driver");
  1011. MODULE_LICENSE("GPL v2");