cam_sync.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/module.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/debugfs.h>
  11. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  12. #include <synx_api.h>
  13. #endif
  14. #include "cam_sync_util.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_common_util.h"
  17. #include "cam_compat.h"
  18. #include "camera_main.h"
  19. #include "cam_req_mgr_workq.h"
  20. struct sync_device *sync_dev;
  21. /*
  22. * Flag to determine whether to enqueue cb of a
  23. * signaled fence onto the workq or invoke it
  24. * directly in the same context
  25. */
  26. static bool trigger_cb_without_switch;
  27. static void cam_sync_print_fence_table(void)
  28. {
  29. int idx;
  30. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  31. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  32. CAM_INFO(CAM_SYNC,
  33. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  34. idx,
  35. sync_dev->sync_table[idx].sync_id,
  36. sync_dev->sync_table[idx].name,
  37. sync_dev->sync_table[idx].type,
  38. sync_dev->sync_table[idx].state,
  39. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  40. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  41. }
  42. }
  43. int cam_sync_create(int32_t *sync_obj, const char *name)
  44. {
  45. int rc;
  46. long idx;
  47. bool bit;
  48. do {
  49. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  50. if (idx >= CAM_SYNC_MAX_OBJS) {
  51. CAM_ERR(CAM_SYNC,
  52. "Error: Unable to create sync idx = %d reached max!",
  53. idx);
  54. cam_sync_print_fence_table();
  55. return -ENOMEM;
  56. }
  57. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  58. bit = test_and_set_bit(idx, sync_dev->bitmap);
  59. } while (bit);
  60. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  61. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  62. CAM_SYNC_TYPE_INDV);
  63. if (rc) {
  64. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  65. idx);
  66. clear_bit(idx, sync_dev->bitmap);
  67. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  68. return -EINVAL;
  69. }
  70. *sync_obj = idx;
  71. CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
  72. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  73. return rc;
  74. }
  75. int cam_sync_register_callback(sync_callback cb_func,
  76. void *userdata, int32_t sync_obj)
  77. {
  78. struct sync_callback_info *sync_cb;
  79. struct sync_table_row *row = NULL;
  80. int status = 0;
  81. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  82. return -EINVAL;
  83. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  84. row = sync_dev->sync_table + sync_obj;
  85. if (row->state == CAM_SYNC_STATE_INVALID) {
  86. CAM_ERR(CAM_SYNC,
  87. "Error: accessing an uninitialized sync obj %d",
  88. sync_obj);
  89. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  90. return -EINVAL;
  91. }
  92. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  93. if (!sync_cb) {
  94. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  95. return -ENOMEM;
  96. }
  97. /* Trigger callback if sync object is already in SIGNALED state */
  98. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  99. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  100. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  101. (!row->remaining)) {
  102. if (trigger_cb_without_switch) {
  103. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
  104. sync_obj);
  105. status = row->state;
  106. kfree(sync_cb);
  107. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  108. cb_func(sync_obj, status, userdata);
  109. } else {
  110. sync_cb->callback_func = cb_func;
  111. sync_cb->cb_data = userdata;
  112. sync_cb->sync_obj = sync_obj;
  113. INIT_WORK(&sync_cb->cb_dispatch_work,
  114. cam_sync_util_cb_dispatch);
  115. sync_cb->status = row->state;
  116. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
  117. sync_cb->sync_obj);
  118. sync_cb->workq_scheduled_ts = ktime_get();
  119. queue_work(sync_dev->work_queue,
  120. &sync_cb->cb_dispatch_work);
  121. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  122. }
  123. return 0;
  124. }
  125. sync_cb->callback_func = cb_func;
  126. sync_cb->cb_data = userdata;
  127. sync_cb->sync_obj = sync_obj;
  128. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  129. list_add_tail(&sync_cb->list, &row->callback_list);
  130. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  131. return 0;
  132. }
  133. int cam_sync_deregister_callback(sync_callback cb_func,
  134. void *userdata, int32_t sync_obj)
  135. {
  136. struct sync_table_row *row = NULL;
  137. struct sync_callback_info *sync_cb, *temp;
  138. bool found = false;
  139. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  140. return -EINVAL;
  141. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  142. row = sync_dev->sync_table + sync_obj;
  143. if (row->state == CAM_SYNC_STATE_INVALID) {
  144. CAM_ERR(CAM_SYNC,
  145. "Error: accessing an uninitialized sync obj = %d",
  146. sync_obj);
  147. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  148. return -EINVAL;
  149. }
  150. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
  151. sync_obj);
  152. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  153. if (sync_cb->callback_func == cb_func &&
  154. sync_cb->cb_data == userdata) {
  155. list_del_init(&sync_cb->list);
  156. kfree(sync_cb);
  157. found = true;
  158. }
  159. }
  160. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  161. return found ? 0 : -ENOENT;
  162. }
  163. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  164. {
  165. struct sync_table_row *row = NULL;
  166. struct sync_table_row *parent_row = NULL;
  167. struct sync_parent_info *parent_info, *temp_parent_info;
  168. struct list_head parents_list;
  169. int rc = 0;
  170. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  171. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  172. sync_obj, CAM_SYNC_MAX_OBJS);
  173. return -EINVAL;
  174. }
  175. row = sync_dev->sync_table + sync_obj;
  176. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  177. if (row->state == CAM_SYNC_STATE_INVALID) {
  178. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  179. CAM_ERR(CAM_SYNC,
  180. "Error: accessing an uninitialized sync obj = %d",
  181. sync_obj);
  182. return -EINVAL;
  183. }
  184. if (row->type == CAM_SYNC_TYPE_GROUP) {
  185. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  186. CAM_ERR(CAM_SYNC,
  187. "Error: Signaling a GROUP sync object = %d",
  188. sync_obj);
  189. return -EINVAL;
  190. }
  191. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  192. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  193. CAM_ERR(CAM_SYNC,
  194. "Error: Sync object already signaled sync_obj = %d",
  195. sync_obj);
  196. return -EALREADY;
  197. }
  198. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  199. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  200. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  201. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  202. CAM_ERR(CAM_SYNC,
  203. "Error: signaling with undefined status = %d event reason = %u",
  204. status, event_cause);
  205. return -EINVAL;
  206. }
  207. if (!atomic_dec_and_test(&row->ref_cnt)) {
  208. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  209. return 0;
  210. }
  211. row->state = status;
  212. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  213. /* copy parent list to local and release child lock */
  214. INIT_LIST_HEAD(&parents_list);
  215. list_splice_init(&row->parents_list, &parents_list);
  216. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  217. if (list_empty(&parents_list))
  218. return 0;
  219. /*
  220. * Now iterate over all parents of this object and if they too need to
  221. * be signaled dispatch cb's
  222. */
  223. list_for_each_entry_safe(parent_info,
  224. temp_parent_info,
  225. &parents_list,
  226. list) {
  227. parent_row = sync_dev->sync_table + parent_info->sync_id;
  228. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  229. parent_row->remaining--;
  230. rc = cam_sync_util_update_parent_state(
  231. parent_row,
  232. status);
  233. if (rc) {
  234. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  235. parent_row->state);
  236. spin_unlock_bh(
  237. &sync_dev->row_spinlocks[parent_info->sync_id]);
  238. kfree(parent_info);
  239. continue;
  240. }
  241. if (!parent_row->remaining)
  242. cam_sync_util_dispatch_signaled_cb(
  243. parent_info->sync_id, parent_row->state,
  244. event_cause);
  245. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  246. list_del_init(&parent_info->list);
  247. kfree(parent_info);
  248. }
  249. return 0;
  250. }
  251. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  252. {
  253. int rc;
  254. long idx = 0;
  255. bool bit;
  256. int i = 0;
  257. if (!sync_obj || !merged_obj) {
  258. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  259. return -EINVAL;
  260. }
  261. if (num_objs <= 1) {
  262. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  263. return -EINVAL;
  264. }
  265. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  266. != num_objs) {
  267. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  268. return -EINVAL;
  269. }
  270. for (i = 0; i < num_objs; i++) {
  271. rc = cam_sync_check_valid(sync_obj[i]);
  272. if (rc) {
  273. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  274. i, sync_obj[i]);
  275. return rc;
  276. }
  277. }
  278. do {
  279. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  280. if (idx >= CAM_SYNC_MAX_OBJS)
  281. return -ENOMEM;
  282. bit = test_and_set_bit(idx, sync_dev->bitmap);
  283. } while (bit);
  284. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  285. rc = cam_sync_init_group_object(sync_dev->sync_table,
  286. idx, sync_obj,
  287. num_objs);
  288. if (rc < 0) {
  289. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  290. idx);
  291. clear_bit(idx, sync_dev->bitmap);
  292. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  293. return -EINVAL;
  294. }
  295. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  296. *merged_obj = idx;
  297. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  298. return 0;
  299. }
  300. int cam_sync_get_obj_ref(int32_t sync_obj)
  301. {
  302. struct sync_table_row *row = NULL;
  303. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  304. return -EINVAL;
  305. row = sync_dev->sync_table + sync_obj;
  306. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  307. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  308. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  309. CAM_ERR(CAM_SYNC,
  310. "Error: accessing an uninitialized sync obj = %d",
  311. sync_obj);
  312. return -EINVAL;
  313. }
  314. atomic_inc(&row->ref_cnt);
  315. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  316. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  317. return 0;
  318. }
  319. int cam_sync_put_obj_ref(int32_t sync_obj)
  320. {
  321. struct sync_table_row *row = NULL;
  322. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  323. return -EINVAL;
  324. row = sync_dev->sync_table + sync_obj;
  325. atomic_dec(&row->ref_cnt);
  326. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  327. return 0;
  328. }
  329. int cam_sync_destroy(int32_t sync_obj)
  330. {
  331. CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
  332. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
  333. }
  334. int cam_sync_check_valid(int32_t sync_obj)
  335. {
  336. struct sync_table_row *row = NULL;
  337. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  338. return -EINVAL;
  339. row = sync_dev->sync_table + sync_obj;
  340. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  341. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %d",
  342. sync_obj);
  343. return -EINVAL;
  344. }
  345. if (row->state == CAM_SYNC_STATE_INVALID) {
  346. CAM_ERR(CAM_SYNC,
  347. "Error: accessing an uninitialized sync obj = %d",
  348. sync_obj);
  349. return -EINVAL;
  350. }
  351. return 0;
  352. }
  353. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  354. {
  355. unsigned long timeleft;
  356. int rc = -EINVAL;
  357. struct sync_table_row *row = NULL;
  358. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  359. return -EINVAL;
  360. row = sync_dev->sync_table + sync_obj;
  361. if (row->state == CAM_SYNC_STATE_INVALID) {
  362. CAM_ERR(CAM_SYNC,
  363. "Error: accessing an uninitialized sync obj = %d",
  364. sync_obj);
  365. return -EINVAL;
  366. }
  367. timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
  368. msecs_to_jiffies(timeout_ms));
  369. if (!timeleft) {
  370. CAM_ERR(CAM_SYNC,
  371. "Error: timed out for sync obj = %d", sync_obj);
  372. rc = -ETIMEDOUT;
  373. } else {
  374. switch (row->state) {
  375. case CAM_SYNC_STATE_INVALID:
  376. case CAM_SYNC_STATE_ACTIVE:
  377. case CAM_SYNC_STATE_SIGNALED_ERROR:
  378. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  379. CAM_ERR(CAM_SYNC,
  380. "Error: Wait on invalid state = %d, obj = %d",
  381. row->state, sync_obj);
  382. rc = -EINVAL;
  383. break;
  384. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  385. rc = 0;
  386. break;
  387. default:
  388. rc = -EINVAL;
  389. break;
  390. }
  391. }
  392. return rc;
  393. }
  394. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  395. {
  396. struct cam_sync_info sync_create;
  397. int result;
  398. if (k_ioctl->size != sizeof(struct cam_sync_info))
  399. return -EINVAL;
  400. if (!k_ioctl->ioctl_ptr)
  401. return -EINVAL;
  402. if (copy_from_user(&sync_create,
  403. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  404. k_ioctl->size))
  405. return -EFAULT;
  406. result = cam_sync_create(&sync_create.sync_obj,
  407. sync_create.name);
  408. if (!result)
  409. if (copy_to_user(
  410. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  411. &sync_create,
  412. k_ioctl->size))
  413. return -EFAULT;
  414. return result;
  415. }
  416. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  417. {
  418. int rc = 0;
  419. struct cam_sync_signal sync_signal;
  420. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  421. return -EINVAL;
  422. if (!k_ioctl->ioctl_ptr)
  423. return -EINVAL;
  424. if (copy_from_user(&sync_signal,
  425. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  426. k_ioctl->size))
  427. return -EFAULT;
  428. /* need to get ref for UMD signaled fences */
  429. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  430. if (rc) {
  431. CAM_DBG(CAM_SYNC,
  432. "Error: cannot signal an uninitialized sync obj = %d",
  433. sync_signal.sync_obj);
  434. return rc;
  435. }
  436. return cam_sync_signal(sync_signal.sync_obj,
  437. sync_signal.sync_state,
  438. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  439. }
  440. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  441. {
  442. struct cam_sync_merge sync_merge;
  443. uint32_t *sync_objs;
  444. uint32_t num_objs;
  445. uint32_t size;
  446. int result;
  447. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  448. return -EINVAL;
  449. if (!k_ioctl->ioctl_ptr)
  450. return -EINVAL;
  451. if (copy_from_user(&sync_merge,
  452. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  453. k_ioctl->size))
  454. return -EFAULT;
  455. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  456. return -EINVAL;
  457. size = sizeof(uint32_t) * sync_merge.num_objs;
  458. sync_objs = kzalloc(size, GFP_ATOMIC);
  459. if (!sync_objs)
  460. return -ENOMEM;
  461. if (copy_from_user(sync_objs,
  462. u64_to_user_ptr(sync_merge.sync_objs),
  463. sizeof(uint32_t) * sync_merge.num_objs)) {
  464. kfree(sync_objs);
  465. return -EFAULT;
  466. }
  467. num_objs = sync_merge.num_objs;
  468. result = cam_sync_merge(sync_objs,
  469. num_objs,
  470. &sync_merge.merged);
  471. if (!result)
  472. if (copy_to_user(
  473. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  474. &sync_merge,
  475. k_ioctl->size)) {
  476. kfree(sync_objs);
  477. return -EFAULT;
  478. }
  479. kfree(sync_objs);
  480. return result;
  481. }
  482. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  483. {
  484. struct cam_sync_wait sync_wait;
  485. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  486. return -EINVAL;
  487. if (!k_ioctl->ioctl_ptr)
  488. return -EINVAL;
  489. if (copy_from_user(&sync_wait,
  490. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  491. k_ioctl->size))
  492. return -EFAULT;
  493. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  494. sync_wait.timeout_ms);
  495. return 0;
  496. }
  497. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  498. {
  499. struct cam_sync_info sync_create;
  500. if (k_ioctl->size != sizeof(struct cam_sync_info))
  501. return -EINVAL;
  502. if (!k_ioctl->ioctl_ptr)
  503. return -EINVAL;
  504. if (copy_from_user(&sync_create,
  505. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  506. k_ioctl->size))
  507. return -EFAULT;
  508. return cam_sync_destroy(sync_create.sync_obj);
  509. }
  510. static int cam_sync_handle_register_user_payload(
  511. struct cam_private_ioctl_arg *k_ioctl)
  512. {
  513. struct cam_sync_userpayload_info userpayload_info;
  514. struct sync_user_payload *user_payload_kernel;
  515. struct sync_user_payload *user_payload_iter;
  516. struct sync_user_payload *temp_upayload_kernel;
  517. uint32_t sync_obj;
  518. struct sync_table_row *row = NULL;
  519. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  520. return -EINVAL;
  521. if (!k_ioctl->ioctl_ptr)
  522. return -EINVAL;
  523. if (copy_from_user(&userpayload_info,
  524. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  525. k_ioctl->size))
  526. return -EFAULT;
  527. sync_obj = userpayload_info.sync_obj;
  528. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  529. return -EINVAL;
  530. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  531. if (!user_payload_kernel)
  532. return -ENOMEM;
  533. memcpy(user_payload_kernel->payload_data,
  534. userpayload_info.payload,
  535. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  536. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  537. row = sync_dev->sync_table + sync_obj;
  538. if (row->state == CAM_SYNC_STATE_INVALID) {
  539. CAM_ERR(CAM_SYNC,
  540. "Error: accessing an uninitialized sync obj = %d",
  541. sync_obj);
  542. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  543. kfree(user_payload_kernel);
  544. return -EINVAL;
  545. }
  546. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  547. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  548. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  549. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  550. sync_obj,
  551. row->state,
  552. user_payload_kernel->payload_data,
  553. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  554. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  555. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  556. kfree(user_payload_kernel);
  557. return 0;
  558. }
  559. list_for_each_entry_safe(user_payload_iter,
  560. temp_upayload_kernel,
  561. &row->user_payload_list,
  562. list) {
  563. if (user_payload_iter->payload_data[0] ==
  564. user_payload_kernel->payload_data[0] &&
  565. user_payload_iter->payload_data[1] ==
  566. user_payload_kernel->payload_data[1]) {
  567. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  568. kfree(user_payload_kernel);
  569. return -EALREADY;
  570. }
  571. }
  572. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  573. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  574. return 0;
  575. }
  576. static int cam_sync_handle_deregister_user_payload(
  577. struct cam_private_ioctl_arg *k_ioctl)
  578. {
  579. struct cam_sync_userpayload_info userpayload_info;
  580. struct sync_user_payload *user_payload_kernel, *temp;
  581. uint32_t sync_obj;
  582. struct sync_table_row *row = NULL;
  583. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  584. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  585. return -EINVAL;
  586. }
  587. if (!k_ioctl->ioctl_ptr) {
  588. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  589. return -EINVAL;
  590. }
  591. if (copy_from_user(&userpayload_info,
  592. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  593. k_ioctl->size))
  594. return -EFAULT;
  595. sync_obj = userpayload_info.sync_obj;
  596. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  597. return -EINVAL;
  598. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  599. row = sync_dev->sync_table + sync_obj;
  600. if (row->state == CAM_SYNC_STATE_INVALID) {
  601. CAM_ERR(CAM_SYNC,
  602. "Error: accessing an uninitialized sync obj = %d",
  603. sync_obj);
  604. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  605. return -EINVAL;
  606. }
  607. list_for_each_entry_safe(user_payload_kernel, temp,
  608. &row->user_payload_list, list) {
  609. if (user_payload_kernel->payload_data[0] ==
  610. userpayload_info.payload[0] &&
  611. user_payload_kernel->payload_data[1] ==
  612. userpayload_info.payload[1]) {
  613. list_del_init(&user_payload_kernel->list);
  614. kfree(user_payload_kernel);
  615. }
  616. }
  617. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  618. return 0;
  619. }
  620. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  621. bool valid_prio, unsigned int cmd, void *arg)
  622. {
  623. int32_t rc;
  624. struct sync_device *sync_dev = video_drvdata(filep);
  625. struct cam_private_ioctl_arg k_ioctl;
  626. if (!sync_dev) {
  627. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  628. return -EINVAL;
  629. }
  630. if (!arg)
  631. return -EINVAL;
  632. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  633. return -ENOIOCTLCMD;
  634. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  635. switch (k_ioctl.id) {
  636. case CAM_SYNC_CREATE:
  637. rc = cam_sync_handle_create(&k_ioctl);
  638. break;
  639. case CAM_SYNC_DESTROY:
  640. rc = cam_sync_handle_destroy(&k_ioctl);
  641. break;
  642. case CAM_SYNC_REGISTER_PAYLOAD:
  643. rc = cam_sync_handle_register_user_payload(
  644. &k_ioctl);
  645. break;
  646. case CAM_SYNC_DEREGISTER_PAYLOAD:
  647. rc = cam_sync_handle_deregister_user_payload(
  648. &k_ioctl);
  649. break;
  650. case CAM_SYNC_SIGNAL:
  651. rc = cam_sync_handle_signal(&k_ioctl);
  652. break;
  653. case CAM_SYNC_MERGE:
  654. rc = cam_sync_handle_merge(&k_ioctl);
  655. break;
  656. case CAM_SYNC_WAIT:
  657. rc = cam_sync_handle_wait(&k_ioctl);
  658. ((struct cam_private_ioctl_arg *)arg)->result =
  659. k_ioctl.result;
  660. break;
  661. default:
  662. rc = -ENOIOCTLCMD;
  663. }
  664. return rc;
  665. }
  666. static unsigned int cam_sync_poll(struct file *f,
  667. struct poll_table_struct *pll_table)
  668. {
  669. int rc = 0;
  670. struct v4l2_fh *eventq = f->private_data;
  671. if (!eventq)
  672. return -EINVAL;
  673. poll_wait(f, &eventq->wait, pll_table);
  674. if (v4l2_event_pending(eventq))
  675. rc = POLLPRI;
  676. return rc;
  677. }
  678. static int cam_sync_open(struct file *filep)
  679. {
  680. int rc;
  681. struct sync_device *sync_dev = video_drvdata(filep);
  682. if (!sync_dev) {
  683. CAM_ERR(CAM_SYNC, "Sync device NULL");
  684. return -ENODEV;
  685. }
  686. mutex_lock(&sync_dev->table_lock);
  687. if (sync_dev->open_cnt >= 1) {
  688. mutex_unlock(&sync_dev->table_lock);
  689. return -EALREADY;
  690. }
  691. rc = v4l2_fh_open(filep);
  692. if (!rc) {
  693. sync_dev->open_cnt++;
  694. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  695. sync_dev->cam_sync_eventq = filep->private_data;
  696. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  697. } else {
  698. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  699. }
  700. mutex_unlock(&sync_dev->table_lock);
  701. return rc;
  702. }
  703. static int cam_sync_close(struct file *filep)
  704. {
  705. int rc = 0;
  706. int i;
  707. struct sync_device *sync_dev = video_drvdata(filep);
  708. if (!sync_dev) {
  709. CAM_ERR(CAM_SYNC, "Sync device NULL");
  710. rc = -ENODEV;
  711. return rc;
  712. }
  713. mutex_lock(&sync_dev->table_lock);
  714. sync_dev->open_cnt--;
  715. if (!sync_dev->open_cnt) {
  716. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  717. struct sync_table_row *row =
  718. sync_dev->sync_table + i;
  719. /*
  720. * Signal all ACTIVE objects as ERR, but we don't
  721. * care about the return status here apart from logging
  722. * it.
  723. */
  724. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  725. rc = cam_sync_signal(i,
  726. CAM_SYNC_STATE_SIGNALED_ERROR,
  727. CAM_SYNC_COMMON_RELEASE_EVENT);
  728. if (rc < 0)
  729. CAM_ERR(CAM_SYNC,
  730. "Cleanup signal fail idx:%d\n",
  731. i);
  732. }
  733. }
  734. /*
  735. * Flush the work queue to wait for pending signal callbacks to
  736. * finish
  737. */
  738. flush_workqueue(sync_dev->work_queue);
  739. /*
  740. * Now that all callbacks worker threads have finished,
  741. * destroy the sync objects
  742. */
  743. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  744. struct sync_table_row *row =
  745. sync_dev->sync_table + i;
  746. if (row->state != CAM_SYNC_STATE_INVALID) {
  747. rc = cam_sync_destroy(i);
  748. if (rc < 0)
  749. CAM_ERR(CAM_SYNC,
  750. "Cleanup destroy fail:idx:%d\n",
  751. i);
  752. }
  753. }
  754. }
  755. mutex_unlock(&sync_dev->table_lock);
  756. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  757. sync_dev->cam_sync_eventq = NULL;
  758. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  759. v4l2_fh_release(filep);
  760. return rc;
  761. }
  762. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  763. struct v4l2_event *new)
  764. {
  765. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  766. struct cam_sync_ev_header_v2 *ev_header;
  767. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  768. CAM_ERR(CAM_CRM,
  769. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  770. old->id, ev_header->sync_obj, ev_header->status,
  771. ev_header->evt_param[0], ev_header->evt_param[1],
  772. ev_header->evt_param[2], ev_header->evt_param[3]);
  773. } else {
  774. struct cam_sync_ev_header *ev_header;
  775. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  776. CAM_ERR(CAM_CRM,
  777. "Failed to notify event id %d fence %d statue %d",
  778. old->id, ev_header->sync_obj, ev_header->status);
  779. }
  780. }
  781. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  782. .merge = cam_sync_event_queue_notify_error,
  783. };
  784. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  785. const struct v4l2_event_subscription *sub)
  786. {
  787. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  788. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  789. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  790. return -EINVAL;
  791. }
  792. sync_dev->version = sub->type;
  793. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  794. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  795. &cam_sync_v4l2_ops);
  796. }
  797. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  798. const struct v4l2_event_subscription *sub)
  799. {
  800. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  801. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  802. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  803. return -EINVAL;
  804. }
  805. return v4l2_event_unsubscribe(fh, sub);
  806. }
  807. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  808. .vidioc_subscribe_event = cam_sync_subscribe_event,
  809. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  810. .vidioc_default = cam_sync_dev_ioctl,
  811. };
  812. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  813. .owner = THIS_MODULE,
  814. .open = cam_sync_open,
  815. .release = cam_sync_close,
  816. .poll = cam_sync_poll,
  817. .unlocked_ioctl = video_ioctl2,
  818. #ifdef CONFIG_COMPAT
  819. .compat_ioctl32 = video_ioctl2,
  820. #endif
  821. };
  822. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  823. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  824. struct platform_device *pdev)
  825. {
  826. int rc;
  827. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  828. GFP_KERNEL);
  829. if (!sync_dev->v4l2_dev.mdev)
  830. return -ENOMEM;
  831. media_device_init(sync_dev->v4l2_dev.mdev);
  832. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  833. sizeof(sync_dev->v4l2_dev.mdev->model));
  834. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  835. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  836. if (rc < 0)
  837. goto register_fail;
  838. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  839. if (rc < 0)
  840. goto entity_fail;
  841. return 0;
  842. entity_fail:
  843. media_device_unregister(sync_dev->v4l2_dev.mdev);
  844. register_fail:
  845. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  846. return rc;
  847. }
  848. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  849. {
  850. media_entity_cleanup(&sync_dev->vdev->entity);
  851. media_device_unregister(sync_dev->v4l2_dev.mdev);
  852. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  853. kfree(sync_dev->v4l2_dev.mdev);
  854. }
  855. static void cam_sync_init_entity(struct sync_device *sync_dev)
  856. {
  857. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  858. sync_dev->vdev->entity.name =
  859. video_device_node_name(sync_dev->vdev);
  860. }
  861. #else
  862. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  863. struct platform_device *pdev)
  864. {
  865. return 0;
  866. }
  867. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  868. {
  869. }
  870. static void cam_sync_init_entity(struct sync_device *sync_dev)
  871. {
  872. }
  873. #endif
  874. static int cam_sync_create_debugfs(void)
  875. {
  876. int rc = 0;
  877. struct dentry *dbgfileptr = NULL;
  878. dbgfileptr = debugfs_create_dir("camera_sync", NULL);
  879. if (!dbgfileptr) {
  880. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  881. rc = -ENOENT;
  882. goto end;
  883. }
  884. /* Store parent inode for cleanup in caller */
  885. sync_dev->dentry = dbgfileptr;
  886. dbgfileptr = debugfs_create_bool("trigger_cb_without_switch", 0644,
  887. sync_dev->dentry, &trigger_cb_without_switch);
  888. if (IS_ERR(dbgfileptr)) {
  889. if (PTR_ERR(dbgfileptr) == -ENODEV)
  890. CAM_WARN(CAM_SYNC, "DebugFS not enabled in kernel!");
  891. else
  892. rc = PTR_ERR(dbgfileptr);
  893. }
  894. end:
  895. return rc;
  896. }
  897. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  898. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  899. {
  900. int rc = 0;
  901. uint32_t sync_status = synx_status;
  902. switch (synx_status) {
  903. case SYNX_STATE_ACTIVE:
  904. sync_status = CAM_SYNC_STATE_ACTIVE;
  905. break;
  906. case SYNX_STATE_SIGNALED_SUCCESS:
  907. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  908. break;
  909. case SYNX_STATE_SIGNALED_ERROR:
  910. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  911. break;
  912. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  913. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  914. break;
  915. default:
  916. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  917. synx_status, sync_obj);
  918. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  919. break;
  920. }
  921. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  922. if (rc) {
  923. CAM_ERR(CAM_SYNC,
  924. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  925. sync_obj, synx_status, sync_status, rc);
  926. }
  927. return rc;
  928. }
  929. static int cam_sync_register_synx_bind_ops(
  930. struct synx_register_params *object)
  931. {
  932. int rc = 0;
  933. rc = synx_register_ops(object);
  934. if (rc)
  935. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  936. return rc;
  937. }
  938. static void cam_sync_unregister_synx_bind_ops(
  939. struct synx_register_params *object)
  940. {
  941. int rc = 0;
  942. rc = synx_deregister_ops(object);
  943. if (rc)
  944. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  945. }
  946. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  947. {
  948. struct synx_register_params *params = object;
  949. params->name = CAM_SYNC_NAME;
  950. params->type = SYNX_TYPE_CSL;
  951. params->ops.register_callback = cam_sync_register_callback;
  952. params->ops.deregister_callback = cam_sync_deregister_callback;
  953. params->ops.enable_signaling = cam_sync_get_obj_ref;
  954. params->ops.signal = cam_synx_sync_signal;
  955. }
  956. #endif
  957. static int cam_sync_component_bind(struct device *dev,
  958. struct device *master_dev, void *data)
  959. {
  960. int rc;
  961. int idx;
  962. struct platform_device *pdev = to_platform_device(dev);
  963. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  964. if (!sync_dev)
  965. return -ENOMEM;
  966. mutex_init(&sync_dev->table_lock);
  967. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  968. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  969. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  970. sync_dev->vdev = video_device_alloc();
  971. if (!sync_dev->vdev) {
  972. rc = -ENOMEM;
  973. goto vdev_fail;
  974. }
  975. rc = cam_sync_media_controller_init(sync_dev, pdev);
  976. if (rc < 0)
  977. goto mcinit_fail;
  978. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  979. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  980. if (rc < 0)
  981. goto register_fail;
  982. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  983. sizeof(sync_dev->vdev->name));
  984. sync_dev->vdev->release = video_device_release_empty;
  985. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  986. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  987. sync_dev->vdev->minor = -1;
  988. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  989. sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO;
  990. rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1);
  991. if (rc < 0) {
  992. CAM_ERR(CAM_SYNC,
  993. "video device registration failure rc = %d, name = %s, device_caps = %d",
  994. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  995. goto v4l2_fail;
  996. }
  997. cam_sync_init_entity(sync_dev);
  998. video_set_drvdata(sync_dev->vdev, sync_dev);
  999. memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
  1000. memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
  1001. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  1002. /*
  1003. * We treat zero as invalid handle, so we will keep the 0th bit set
  1004. * always
  1005. */
  1006. set_bit(0, sync_dev->bitmap);
  1007. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  1008. WQ_HIGHPRI | WQ_UNBOUND, 1);
  1009. if (!sync_dev->work_queue) {
  1010. CAM_ERR(CAM_SYNC,
  1011. "Error: high priority work queue creation failed");
  1012. rc = -ENOMEM;
  1013. goto v4l2_fail;
  1014. }
  1015. trigger_cb_without_switch = false;
  1016. cam_sync_create_debugfs();
  1017. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1018. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  1019. cam_sync_configure_synx_obj(&sync_dev->params);
  1020. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  1021. if (rc)
  1022. goto v4l2_fail;
  1023. #endif
  1024. CAM_DBG(CAM_SYNC, "Component bound successfully");
  1025. return rc;
  1026. v4l2_fail:
  1027. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1028. register_fail:
  1029. cam_sync_media_controller_cleanup(sync_dev);
  1030. mcinit_fail:
  1031. video_unregister_device(sync_dev->vdev);
  1032. video_device_release(sync_dev->vdev);
  1033. vdev_fail:
  1034. mutex_destroy(&sync_dev->table_lock);
  1035. kfree(sync_dev);
  1036. return rc;
  1037. }
  1038. static void cam_sync_component_unbind(struct device *dev,
  1039. struct device *master_dev, void *data)
  1040. {
  1041. int i;
  1042. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1043. cam_sync_media_controller_cleanup(sync_dev);
  1044. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1045. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1046. #endif
  1047. video_unregister_device(sync_dev->vdev);
  1048. video_device_release(sync_dev->vdev);
  1049. debugfs_remove_recursive(sync_dev->dentry);
  1050. sync_dev->dentry = NULL;
  1051. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1052. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1053. kfree(sync_dev);
  1054. sync_dev = NULL;
  1055. }
  1056. const static struct component_ops cam_sync_component_ops = {
  1057. .bind = cam_sync_component_bind,
  1058. .unbind = cam_sync_component_unbind,
  1059. };
  1060. static int cam_sync_probe(struct platform_device *pdev)
  1061. {
  1062. int rc = 0;
  1063. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1064. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1065. if (rc)
  1066. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1067. return rc;
  1068. }
  1069. static int cam_sync_remove(struct platform_device *pdev)
  1070. {
  1071. component_del(&pdev->dev, &cam_sync_component_ops);
  1072. return 0;
  1073. }
  1074. static const struct of_device_id cam_sync_dt_match[] = {
  1075. {.compatible = "qcom,cam-sync"},
  1076. {}
  1077. };
  1078. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1079. struct platform_driver cam_sync_driver = {
  1080. .probe = cam_sync_probe,
  1081. .remove = cam_sync_remove,
  1082. .driver = {
  1083. .name = "cam_sync",
  1084. .owner = THIS_MODULE,
  1085. .of_match_table = cam_sync_dt_match,
  1086. .suppress_bind_attrs = true,
  1087. },
  1088. };
  1089. int cam_sync_init(void)
  1090. {
  1091. return platform_driver_register(&cam_sync_driver);
  1092. }
  1093. void cam_sync_exit(void)
  1094. {
  1095. platform_driver_unregister(&cam_sync_driver);
  1096. }
  1097. MODULE_DESCRIPTION("Camera sync driver");
  1098. MODULE_LICENSE("GPL v2");