cam_sync.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/irqflags.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/debugfs.h>
  12. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  13. #include <synx_api.h>
  14. #endif
  15. #include "cam_sync_util.h"
  16. #include "cam_debug_util.h"
  17. #include "cam_common_util.h"
  18. #include "cam_compat.h"
  19. #include "camera_main.h"
  20. #include "cam_req_mgr_workq.h"
  21. struct sync_device *sync_dev;
  22. /*
  23. * Flag to determine whether to enqueue cb of a
  24. * signaled fence onto the workq or invoke it
  25. * directly in the same context
  26. */
  27. static bool trigger_cb_without_switch;
  28. static void cam_sync_print_fence_table(void)
  29. {
  30. int idx;
  31. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  32. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  33. CAM_INFO(CAM_SYNC,
  34. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  35. idx,
  36. sync_dev->sync_table[idx].sync_id,
  37. sync_dev->sync_table[idx].name,
  38. sync_dev->sync_table[idx].type,
  39. sync_dev->sync_table[idx].state,
  40. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  41. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  42. }
  43. }
  44. static int cam_sync_create_util(
  45. int32_t *sync_obj, const char *name,
  46. struct cam_dma_fence_create_sync_obj_payload *dma_sync_create_info)
  47. {
  48. int rc;
  49. long idx;
  50. bool bit;
  51. struct sync_table_row *row = NULL;
  52. do {
  53. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  54. if (idx >= CAM_SYNC_MAX_OBJS) {
  55. CAM_ERR(CAM_SYNC,
  56. "Error: Unable to create sync idx = %d sync name = %s reached max!",
  57. idx, name);
  58. cam_sync_print_fence_table();
  59. return -ENOMEM;
  60. }
  61. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  62. bit = test_and_set_bit(idx, sync_dev->bitmap);
  63. } while (bit);
  64. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  65. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  66. CAM_SYNC_TYPE_INDV);
  67. if (rc) {
  68. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  69. idx);
  70. clear_bit(idx, sync_dev->bitmap);
  71. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  72. return -EINVAL;
  73. }
  74. *sync_obj = idx;
  75. /* Associate sync obj with dma fence if any holding sync lock */
  76. if (dma_sync_create_info) {
  77. row = sync_dev->sync_table + idx;
  78. row->dma_fence_info.dma_fence_fd = dma_sync_create_info->fd;
  79. row->dma_fence_info.dma_fence_row_idx = dma_sync_create_info->dma_fence_row_idx;
  80. row->dma_fence_info.sync_created_with_dma =
  81. dma_sync_create_info->sync_created_with_dma;
  82. set_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask);
  83. /* Association refcnt for non-import cases */
  84. if (dma_sync_create_info->sync_created_with_dma) {
  85. rc = cam_dma_fence_get_put_ref(true, row->dma_fence_info.dma_fence_row_idx);
  86. if (rc)
  87. CAM_ERR(CAM_SYNC,
  88. "Failed to getref on dma fence idx: %u fd: %d sync_obj: %d rc: %d",
  89. row->dma_fence_info.dma_fence_row_idx,
  90. row->dma_fence_info.dma_fence_fd,
  91. *sync_obj, rc);
  92. goto end;
  93. }
  94. CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with dma fence fd: %d",
  95. name, *sync_obj, dma_sync_create_info->fd);
  96. goto end;
  97. }
  98. CAM_DBG(CAM_SYNC, "sync_obj: %s[%i]", name, *sync_obj);
  99. end:
  100. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  101. return rc;
  102. }
  103. int cam_sync_create(int32_t *sync_obj, const char *name)
  104. {
  105. return cam_sync_create_util(sync_obj, name, NULL);
  106. }
  107. int cam_sync_register_callback(sync_callback cb_func,
  108. void *userdata, int32_t sync_obj)
  109. {
  110. struct sync_callback_info *sync_cb;
  111. struct sync_table_row *row = NULL;
  112. int status = 0;
  113. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
  114. return -EINVAL;
  115. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  116. row = sync_dev->sync_table + sync_obj;
  117. if (row->state == CAM_SYNC_STATE_INVALID) {
  118. CAM_ERR(CAM_SYNC,
  119. "Error: accessing an uninitialized sync obj %s[%d]",
  120. row->name,
  121. sync_obj);
  122. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  123. return -EINVAL;
  124. }
  125. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  126. if (!sync_cb) {
  127. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  128. return -ENOMEM;
  129. }
  130. /* Trigger callback if sync object is already in SIGNALED state */
  131. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  132. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  133. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  134. (!row->remaining)) {
  135. if (trigger_cb_without_switch) {
  136. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]",
  137. row->name,
  138. sync_obj);
  139. status = row->state;
  140. kfree(sync_cb);
  141. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  142. cb_func(sync_obj, status, userdata);
  143. } else {
  144. sync_cb->callback_func = cb_func;
  145. sync_cb->cb_data = userdata;
  146. sync_cb->sync_obj = sync_obj;
  147. INIT_WORK(&sync_cb->cb_dispatch_work,
  148. cam_sync_util_cb_dispatch);
  149. sync_cb->status = row->state;
  150. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%s[%d]",
  151. row->name,
  152. sync_cb->sync_obj);
  153. sync_cb->workq_scheduled_ts = ktime_get();
  154. queue_work(sync_dev->work_queue,
  155. &sync_cb->cb_dispatch_work);
  156. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  157. }
  158. return 0;
  159. }
  160. sync_cb->callback_func = cb_func;
  161. sync_cb->cb_data = userdata;
  162. sync_cb->sync_obj = sync_obj;
  163. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  164. list_add_tail(&sync_cb->list, &row->callback_list);
  165. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  166. return 0;
  167. }
  168. int cam_sync_deregister_callback(sync_callback cb_func,
  169. void *userdata, int32_t sync_obj)
  170. {
  171. struct sync_table_row *row = NULL;
  172. struct sync_callback_info *sync_cb, *temp;
  173. bool found = false;
  174. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  175. return -EINVAL;
  176. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  177. row = sync_dev->sync_table + sync_obj;
  178. if (row->state == CAM_SYNC_STATE_INVALID) {
  179. CAM_ERR(CAM_SYNC,
  180. "Error: accessing an uninitialized sync obj = %s[%d]",
  181. row->name,
  182. sync_obj);
  183. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  184. return -EINVAL;
  185. }
  186. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]",
  187. row->name,
  188. sync_obj);
  189. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  190. if (sync_cb->callback_func == cb_func &&
  191. sync_cb->cb_data == userdata) {
  192. list_del_init(&sync_cb->list);
  193. kfree(sync_cb);
  194. found = true;
  195. }
  196. }
  197. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  198. return found ? 0 : -ENOENT;
  199. }
  200. static inline int cam_sync_signal_dma_fence_util(
  201. struct sync_table_row *row, uint32_t status)
  202. {
  203. struct cam_dma_fence_signal signal_dma_fence;
  204. signal_dma_fence.dma_fence_fd = row->dma_fence_info.dma_fence_fd;
  205. switch (status) {
  206. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  207. signal_dma_fence.status = 0;
  208. break;
  209. case CAM_SYNC_STATE_SIGNALED_ERROR:
  210. /* Advertise error */
  211. signal_dma_fence.status = -EADV;
  212. break;
  213. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  214. signal_dma_fence.status = -ECANCELED;
  215. break;
  216. default:
  217. CAM_ERR(CAM_SYNC,
  218. "Signaling undefined status: %d for sync obj: %d",
  219. status, row->sync_id);
  220. return -EINVAL;
  221. }
  222. return cam_dma_fence_internal_signal(row->dma_fence_info.dma_fence_row_idx,
  223. &signal_dma_fence);
  224. }
  225. static void cam_sync_signal_parent_util(int32_t status,
  226. uint32_t event_cause, struct list_head *parents_list)
  227. {
  228. int rc;
  229. struct sync_table_row *parent_row = NULL;
  230. struct sync_parent_info *parent_info, *temp_parent_info;
  231. /*
  232. * Now iterate over all parents of this object and if they too need to
  233. * be signaled dispatch cb's
  234. */
  235. list_for_each_entry_safe(parent_info, temp_parent_info,
  236. parents_list, list) {
  237. parent_row = sync_dev->sync_table + parent_info->sync_id;
  238. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  239. parent_row->remaining--;
  240. rc = cam_sync_util_update_parent_state(
  241. parent_row,
  242. status);
  243. if (rc) {
  244. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  245. parent_row->state);
  246. spin_unlock_bh(
  247. &sync_dev->row_spinlocks[parent_info->sync_id]);
  248. kfree(parent_info);
  249. continue;
  250. }
  251. if (!parent_row->remaining)
  252. cam_sync_util_dispatch_signaled_cb(
  253. parent_info->sync_id, parent_row->state,
  254. event_cause);
  255. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  256. list_del_init(&parent_info->list);
  257. kfree(parent_info);
  258. }
  259. }
  260. static int cam_sync_signal_validate_util(
  261. int32_t sync_obj, int32_t status)
  262. {
  263. struct sync_table_row *row = sync_dev->sync_table + sync_obj;
  264. if (row->state == CAM_SYNC_STATE_INVALID) {
  265. CAM_ERR(CAM_SYNC,
  266. "Error: accessing an uninitialized sync obj = %s[%d]",
  267. row->name, sync_obj);
  268. return -EINVAL;
  269. }
  270. if (row->type == CAM_SYNC_TYPE_GROUP) {
  271. CAM_ERR(CAM_SYNC,
  272. "Error: Signaling a GROUP sync object = %s[%d]",
  273. row->name, sync_obj);
  274. return -EINVAL;
  275. }
  276. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  277. CAM_ERR(CAM_SYNC,
  278. "Error: Sync object already signaled sync_obj = %s[%d]",
  279. row->name, sync_obj);
  280. return -EALREADY;
  281. }
  282. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  283. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  284. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  285. CAM_ERR(CAM_SYNC,
  286. "Error: signaling with undefined status = %d", status);
  287. return -EINVAL;
  288. }
  289. return 0;
  290. }
  291. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  292. {
  293. struct sync_table_row *row = NULL;
  294. struct list_head parents_list;
  295. int rc = 0;
  296. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
  297. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  298. sync_obj, CAM_SYNC_MAX_OBJS);
  299. return -EINVAL;
  300. }
  301. row = sync_dev->sync_table + sync_obj;
  302. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  303. rc = cam_sync_signal_validate_util(sync_obj, status);
  304. if (rc) {
  305. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  306. CAM_ERR(CAM_SYNC,
  307. "Error: Failed to validate signal info for sync_obj = %s[%d] with status = %d rc = %d",
  308. row->name, sync_obj, status, rc);
  309. return rc;
  310. }
  311. if (!atomic_dec_and_test(&row->ref_cnt)) {
  312. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  313. return 0;
  314. }
  315. row->state = status;
  316. /*
  317. * Signal associated dma fence first - external entities
  318. * waiting on this fence can start processing
  319. */
  320. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  321. rc = cam_sync_signal_dma_fence_util(row, status);
  322. if (rc)
  323. CAM_ERR(CAM_SYNC,
  324. "Error: Failed to signal associated dma fencefd = %d for sync_obj = %s[%d]",
  325. row->dma_fence_info.dma_fence_fd, row->name, sync_obj);
  326. }
  327. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  328. /* copy parent list to local and release child lock */
  329. INIT_LIST_HEAD(&parents_list);
  330. list_splice_init(&row->parents_list, &parents_list);
  331. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  332. if (list_empty(&parents_list))
  333. return 0;
  334. cam_sync_signal_parent_util(status, event_cause, &parents_list);
  335. return 0;
  336. }
  337. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  338. {
  339. int rc;
  340. long idx = 0;
  341. bool bit;
  342. int i = 0;
  343. if (!sync_obj || !merged_obj) {
  344. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  345. return -EINVAL;
  346. }
  347. if (num_objs <= 1) {
  348. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  349. return -EINVAL;
  350. }
  351. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  352. != num_objs) {
  353. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  354. return -EINVAL;
  355. }
  356. for (i = 0; i < num_objs; i++) {
  357. rc = cam_sync_check_valid(sync_obj[i]);
  358. if (rc) {
  359. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  360. i, sync_obj[i]);
  361. return rc;
  362. }
  363. }
  364. do {
  365. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  366. if (idx >= CAM_SYNC_MAX_OBJS)
  367. return -ENOMEM;
  368. bit = test_and_set_bit(idx, sync_dev->bitmap);
  369. } while (bit);
  370. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  371. rc = cam_sync_init_group_object(sync_dev->sync_table,
  372. idx, sync_obj,
  373. num_objs);
  374. if (rc < 0) {
  375. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  376. idx);
  377. clear_bit(idx, sync_dev->bitmap);
  378. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  379. return -EINVAL;
  380. }
  381. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  382. *merged_obj = idx;
  383. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  384. return 0;
  385. }
  386. int cam_sync_get_obj_ref(int32_t sync_obj)
  387. {
  388. struct sync_table_row *row = NULL;
  389. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  390. return -EINVAL;
  391. row = sync_dev->sync_table + sync_obj;
  392. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  393. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  394. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  395. CAM_ERR(CAM_SYNC,
  396. "Error: accessing an uninitialized sync obj = %s[%d]",
  397. row->name,
  398. sync_obj);
  399. return -EINVAL;
  400. }
  401. atomic_inc(&row->ref_cnt);
  402. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  403. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  404. return 0;
  405. }
  406. int cam_sync_put_obj_ref(int32_t sync_obj)
  407. {
  408. struct sync_table_row *row = NULL;
  409. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  410. return -EINVAL;
  411. row = sync_dev->sync_table + sync_obj;
  412. atomic_dec(&row->ref_cnt);
  413. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  414. return 0;
  415. }
  416. int cam_sync_destroy(int32_t sync_obj)
  417. {
  418. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj, NULL);
  419. }
  420. int cam_sync_check_valid(int32_t sync_obj)
  421. {
  422. struct sync_table_row *row = NULL;
  423. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  424. return -EINVAL;
  425. row = sync_dev->sync_table + sync_obj;
  426. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  427. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]",
  428. row->name,
  429. sync_obj);
  430. return -EINVAL;
  431. }
  432. if (row->state == CAM_SYNC_STATE_INVALID) {
  433. CAM_ERR(CAM_SYNC,
  434. "Error: accessing an uninitialized sync obj = %s[%d]",
  435. row->name,
  436. sync_obj);
  437. return -EINVAL;
  438. }
  439. return 0;
  440. }
  441. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  442. {
  443. unsigned long timeleft;
  444. int rc = -EINVAL;
  445. struct sync_table_row *row = NULL;
  446. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  447. return -EINVAL;
  448. row = sync_dev->sync_table + sync_obj;
  449. if (row->state == CAM_SYNC_STATE_INVALID) {
  450. CAM_ERR(CAM_SYNC,
  451. "Error: accessing an uninitialized sync obj = %s[%d]",
  452. row->name,
  453. sync_obj);
  454. return -EINVAL;
  455. }
  456. timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
  457. msecs_to_jiffies(timeout_ms));
  458. if (!timeleft) {
  459. CAM_ERR(CAM_SYNC,
  460. "Error: timed out for sync obj = %s[%d]", row->name, sync_obj);
  461. rc = -ETIMEDOUT;
  462. } else {
  463. switch (row->state) {
  464. case CAM_SYNC_STATE_INVALID:
  465. case CAM_SYNC_STATE_ACTIVE:
  466. case CAM_SYNC_STATE_SIGNALED_ERROR:
  467. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  468. CAM_ERR(CAM_SYNC,
  469. "Error: Wait on invalid state = %d, obj = %d, name = %s",
  470. row->state, sync_obj, row->name);
  471. rc = -EINVAL;
  472. break;
  473. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  474. rc = 0;
  475. break;
  476. default:
  477. rc = -EINVAL;
  478. break;
  479. }
  480. }
  481. return rc;
  482. }
  483. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  484. {
  485. struct cam_sync_info sync_create;
  486. int result;
  487. if (k_ioctl->size != sizeof(struct cam_sync_info))
  488. return -EINVAL;
  489. if (!k_ioctl->ioctl_ptr)
  490. return -EINVAL;
  491. if (copy_from_user(&sync_create,
  492. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  493. k_ioctl->size))
  494. return -EFAULT;
  495. sync_create.name[SYNC_DEBUG_NAME_LEN] = '\0';
  496. result = cam_sync_create(&sync_create.sync_obj,
  497. sync_create.name);
  498. if (!result)
  499. if (copy_to_user(
  500. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  501. &sync_create,
  502. k_ioctl->size))
  503. return -EFAULT;
  504. return result;
  505. }
  506. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  507. {
  508. int rc = 0;
  509. struct cam_sync_signal sync_signal;
  510. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  511. return -EINVAL;
  512. if (!k_ioctl->ioctl_ptr)
  513. return -EINVAL;
  514. if (copy_from_user(&sync_signal,
  515. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  516. k_ioctl->size))
  517. return -EFAULT;
  518. /* need to get ref for UMD signaled fences */
  519. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  520. if (rc) {
  521. CAM_DBG(CAM_SYNC,
  522. "Error: cannot signal an uninitialized sync obj = %d",
  523. sync_signal.sync_obj);
  524. return rc;
  525. }
  526. return cam_sync_signal(sync_signal.sync_obj,
  527. sync_signal.sync_state,
  528. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  529. }
  530. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  531. {
  532. struct cam_sync_merge sync_merge;
  533. uint32_t *sync_objs;
  534. uint32_t num_objs;
  535. uint32_t size;
  536. int result;
  537. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  538. return -EINVAL;
  539. if (!k_ioctl->ioctl_ptr)
  540. return -EINVAL;
  541. if (copy_from_user(&sync_merge,
  542. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  543. k_ioctl->size))
  544. return -EFAULT;
  545. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  546. return -EINVAL;
  547. size = sizeof(uint32_t) * sync_merge.num_objs;
  548. sync_objs = kzalloc(size, GFP_ATOMIC);
  549. if (!sync_objs)
  550. return -ENOMEM;
  551. if (copy_from_user(sync_objs,
  552. u64_to_user_ptr(sync_merge.sync_objs),
  553. sizeof(uint32_t) * sync_merge.num_objs)) {
  554. kfree(sync_objs);
  555. return -EFAULT;
  556. }
  557. num_objs = sync_merge.num_objs;
  558. result = cam_sync_merge(sync_objs,
  559. num_objs,
  560. &sync_merge.merged);
  561. if (!result)
  562. if (copy_to_user(
  563. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  564. &sync_merge,
  565. k_ioctl->size)) {
  566. kfree(sync_objs);
  567. return -EFAULT;
  568. }
  569. kfree(sync_objs);
  570. return result;
  571. }
  572. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  573. {
  574. struct cam_sync_wait sync_wait;
  575. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  576. return -EINVAL;
  577. if (!k_ioctl->ioctl_ptr)
  578. return -EINVAL;
  579. if (copy_from_user(&sync_wait,
  580. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  581. k_ioctl->size))
  582. return -EFAULT;
  583. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  584. sync_wait.timeout_ms);
  585. return 0;
  586. }
  587. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  588. {
  589. struct cam_sync_info sync_create;
  590. if (k_ioctl->size != sizeof(struct cam_sync_info))
  591. return -EINVAL;
  592. if (!k_ioctl->ioctl_ptr)
  593. return -EINVAL;
  594. if (copy_from_user(&sync_create,
  595. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  596. k_ioctl->size))
  597. return -EFAULT;
  598. return cam_sync_destroy(sync_create.sync_obj);
  599. }
  600. static int cam_sync_handle_register_user_payload(
  601. struct cam_private_ioctl_arg *k_ioctl)
  602. {
  603. struct cam_sync_userpayload_info userpayload_info;
  604. struct sync_user_payload *user_payload_kernel;
  605. struct sync_user_payload *user_payload_iter;
  606. struct sync_user_payload *temp_upayload_kernel;
  607. uint32_t sync_obj;
  608. struct sync_table_row *row = NULL;
  609. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  610. return -EINVAL;
  611. if (!k_ioctl->ioctl_ptr)
  612. return -EINVAL;
  613. if (copy_from_user(&userpayload_info,
  614. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  615. k_ioctl->size))
  616. return -EFAULT;
  617. sync_obj = userpayload_info.sync_obj;
  618. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  619. return -EINVAL;
  620. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  621. if (!user_payload_kernel)
  622. return -ENOMEM;
  623. memcpy(user_payload_kernel->payload_data,
  624. userpayload_info.payload,
  625. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  626. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  627. row = sync_dev->sync_table + sync_obj;
  628. if (row->state == CAM_SYNC_STATE_INVALID) {
  629. CAM_ERR(CAM_SYNC,
  630. "Error: accessing an uninitialized sync obj = %s[%d]",
  631. row->name,
  632. sync_obj);
  633. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  634. kfree(user_payload_kernel);
  635. return -EINVAL;
  636. }
  637. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  638. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  639. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  640. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  641. sync_obj,
  642. row->state,
  643. user_payload_kernel->payload_data,
  644. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  645. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  646. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  647. kfree(user_payload_kernel);
  648. return 0;
  649. }
  650. list_for_each_entry_safe(user_payload_iter,
  651. temp_upayload_kernel,
  652. &row->user_payload_list,
  653. list) {
  654. if (user_payload_iter->payload_data[0] ==
  655. user_payload_kernel->payload_data[0] &&
  656. user_payload_iter->payload_data[1] ==
  657. user_payload_kernel->payload_data[1]) {
  658. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  659. kfree(user_payload_kernel);
  660. return -EALREADY;
  661. }
  662. }
  663. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  664. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  665. return 0;
  666. }
  667. static int cam_sync_handle_deregister_user_payload(
  668. struct cam_private_ioctl_arg *k_ioctl)
  669. {
  670. struct cam_sync_userpayload_info userpayload_info;
  671. struct sync_user_payload *user_payload_kernel, *temp;
  672. uint32_t sync_obj;
  673. struct sync_table_row *row = NULL;
  674. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  675. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  676. return -EINVAL;
  677. }
  678. if (!k_ioctl->ioctl_ptr) {
  679. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  680. return -EINVAL;
  681. }
  682. if (copy_from_user(&userpayload_info,
  683. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  684. k_ioctl->size))
  685. return -EFAULT;
  686. sync_obj = userpayload_info.sync_obj;
  687. if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
  688. return -EINVAL;
  689. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  690. row = sync_dev->sync_table + sync_obj;
  691. if (row->state == CAM_SYNC_STATE_INVALID) {
  692. CAM_ERR(CAM_SYNC,
  693. "Error: accessing an uninitialized sync obj = %s[%d]",
  694. row->name,
  695. sync_obj);
  696. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  697. return -EINVAL;
  698. }
  699. list_for_each_entry_safe(user_payload_kernel, temp,
  700. &row->user_payload_list, list) {
  701. if (user_payload_kernel->payload_data[0] ==
  702. userpayload_info.payload[0] &&
  703. user_payload_kernel->payload_data[1] ==
  704. userpayload_info.payload[1]) {
  705. list_del_init(&user_payload_kernel->list);
  706. kfree(user_payload_kernel);
  707. }
  708. }
  709. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  710. return 0;
  711. }
  712. static int cam_sync_dma_fence_cb(
  713. int32_t sync_obj,
  714. struct cam_dma_fence_signal_sync_obj *signal_sync_obj)
  715. {
  716. int32_t rc = 0;
  717. int32_t status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  718. struct sync_table_row *row = NULL;
  719. struct list_head parents_list;
  720. if (!signal_sync_obj) {
  721. CAM_ERR(CAM_SYNC, "Invalid signal info args");
  722. return -EINVAL;
  723. }
  724. /* Validate sync object range */
  725. if (!(sync_obj > 0 && sync_obj < CAM_SYNC_MAX_OBJS)) {
  726. CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
  727. return -EINVAL;
  728. }
  729. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  730. row = sync_dev->sync_table + sync_obj;
  731. /* Validate if sync obj has a dma fence association */
  732. if (!test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  733. CAM_ERR(CAM_SYNC,
  734. "sync obj = %d[%s] has no associated dma fence ext_fence_mask = 0x%x",
  735. sync_obj, row->name, row->ext_fence_mask);
  736. rc = -EINVAL;
  737. goto end;
  738. }
  739. /* Validate if we are signaling the right sync obj based on dma fence fd */
  740. if (row->dma_fence_info.dma_fence_fd != signal_sync_obj->fd) {
  741. CAM_ERR(CAM_SYNC,
  742. "sync obj: %d[%s] is associated with a different fd: %d, signaling for fd: %d",
  743. sync_obj, row->name, row->dma_fence_info.dma_fence_fd, signal_sync_obj->fd);
  744. rc = -EINVAL;
  745. goto end;
  746. }
  747. /* Check for error status */
  748. if (signal_sync_obj->status < 0) {
  749. if (signal_sync_obj->status == -ECANCELED)
  750. status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  751. else
  752. status = CAM_SYNC_STATE_SIGNALED_ERROR;
  753. }
  754. rc = cam_sync_signal_validate_util(sync_obj, status);
  755. if (rc) {
  756. CAM_ERR(CAM_SYNC,
  757. "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d",
  758. sync_obj, row->name, status, rc);
  759. goto end;
  760. }
  761. if (!atomic_dec_and_test(&row->ref_cnt))
  762. goto end;
  763. row->state = status;
  764. cam_sync_util_dispatch_signaled_cb(sync_obj, status, 0);
  765. INIT_LIST_HEAD(&parents_list);
  766. list_splice_init(&row->parents_list, &parents_list);
  767. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  768. if (list_empty(&parents_list))
  769. return 0;
  770. cam_sync_signal_parent_util(status, 0x0, &parents_list);
  771. return 0;
  772. end:
  773. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  774. return rc;
  775. }
  776. static int cam_generic_fence_alloc_validate_input_info_util(
  777. struct cam_generic_fence_cmd_args *fence_cmd_args,
  778. struct cam_generic_fence_input_info **fence_input_info)
  779. {
  780. int rc = 0;
  781. struct cam_generic_fence_input_info *fence_input = NULL;
  782. uint32_t num_fences;
  783. size_t expected_size;
  784. *fence_input_info = NULL;
  785. if (fence_cmd_args->input_data_size <
  786. sizeof(struct cam_generic_fence_input_info)) {
  787. CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx",
  788. sizeof(struct cam_generic_fence_input_info),
  789. fence_cmd_args->input_data_size);
  790. return -EINVAL;
  791. }
  792. fence_input = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  793. fence_cmd_args->input_data_size);
  794. if (IS_ERR_OR_NULL(fence_input)) {
  795. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  796. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  797. return -ENOMEM;
  798. }
  799. /* Validate num fences */
  800. num_fences = fence_input->num_fences_requested;
  801. if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) {
  802. CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching",
  803. num_fences);
  804. rc = -EINVAL;
  805. goto free_mem;
  806. }
  807. /* Validate sizes */
  808. expected_size = sizeof(struct cam_generic_fence_input_info) +
  809. ((num_fences - 1) * sizeof(struct cam_generic_fence_config));
  810. if ((uint32_t)expected_size != fence_cmd_args->input_data_size) {
  811. CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u",
  812. expected_size, fence_cmd_args->input_data_size, num_fences);
  813. rc = -EINVAL;
  814. goto free_mem;
  815. }
  816. *fence_input_info = fence_input;
  817. return rc;
  818. free_mem:
  819. kfree(fence_input);
  820. return rc;
  821. }
  822. static void cam_generic_fence_free_input_info_util(
  823. struct cam_generic_fence_input_info **fence_input_info)
  824. {
  825. struct cam_generic_fence_input_info *fence_input = *fence_input_info;
  826. kfree(fence_input);
  827. *fence_input_info = NULL;
  828. }
  829. static int cam_generic_fence_handle_dma_create(
  830. struct cam_generic_fence_cmd_args *fence_cmd_args)
  831. {
  832. int rc = 0, i, dma_fence_row_idx;
  833. struct cam_generic_fence_input_info *fence_input_info = NULL;
  834. struct cam_generic_fence_config *fence_cfg = NULL;
  835. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  836. if (rc || !fence_input_info) {
  837. CAM_ERR(CAM_DMA_FENCE,
  838. "Fence input info validation failed rc: %d fence_input_info: %pK",
  839. rc, fence_input_info);
  840. return -EINVAL;
  841. }
  842. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  843. fence_cfg = &fence_input_info->fence_cfg[i];
  844. fence_input_info->num_fences_processed++;
  845. fence_cfg->reason_code = 0;
  846. rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd,
  847. &dma_fence_row_idx, fence_cfg->name);
  848. if (rc) {
  849. CAM_ERR(CAM_DMA_FENCE,
  850. "Failed to create dma fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  851. i, rc, fence_input_info->num_fences_requested,
  852. fence_input_info->num_fences_processed);
  853. fence_cfg->reason_code = rc;
  854. goto out_copy;
  855. }
  856. CAM_DBG(CAM_DMA_FENCE,
  857. "Created dma_fence @ i: %d fence fd: %d[%s] num fences [requested: %u processed: %u] ",
  858. i, fence_cfg->dma_fence_fd, fence_cfg->name,
  859. fence_input_info->num_fences_requested,
  860. fence_input_info->num_fences_processed);
  861. }
  862. out_copy:
  863. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  864. fence_input_info, fence_cmd_args->input_data_size)) {
  865. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  866. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  867. rc = -EFAULT;
  868. }
  869. cam_generic_fence_free_input_info_util(&fence_input_info);
  870. return rc;
  871. }
  872. static int cam_generic_fence_handle_dma_release(
  873. struct cam_generic_fence_cmd_args *fence_cmd_args)
  874. {
  875. int rc = 0, i;
  876. bool failed = false;
  877. struct cam_dma_fence_release_params release_params;
  878. struct cam_generic_fence_input_info *fence_input_info = NULL;
  879. struct cam_generic_fence_config *fence_cfg = NULL;
  880. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  881. if (rc || !fence_input_info) {
  882. CAM_ERR(CAM_DMA_FENCE,
  883. "Fence input info validation failed rc: %d fence_input_info: %pK",
  884. rc, fence_input_info);
  885. return -EINVAL;
  886. }
  887. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  888. fence_cfg = &fence_input_info->fence_cfg[i];
  889. fence_input_info->num_fences_processed++;
  890. fence_cfg->reason_code = 0;
  891. release_params.use_row_idx = false;
  892. release_params.u.dma_fence_fd = fence_cfg->dma_fence_fd;
  893. rc = cam_dma_fence_release(&release_params);
  894. if (rc) {
  895. CAM_ERR(CAM_DMA_FENCE,
  896. "Failed to destroy dma fence at index: %d fd: %d rc: %d num fences [requested: %u processed: %u]",
  897. i, fence_cfg->dma_fence_fd, rc,
  898. fence_input_info->num_fences_requested,
  899. fence_input_info->num_fences_processed);
  900. fence_cfg->reason_code = rc;
  901. /* Continue to release other fences, but mark the call as failed */
  902. failed = true;
  903. continue;
  904. }
  905. CAM_DBG(CAM_DMA_FENCE,
  906. "Released dma_fence @ i: %d fd: %d num fences [requested: %u processed: %u]",
  907. i, fence_cfg->dma_fence_fd,
  908. fence_input_info->num_fences_requested,
  909. fence_input_info->num_fences_processed);
  910. }
  911. if (failed)
  912. rc = -ENOMSG;
  913. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  914. fence_input_info, fence_cmd_args->input_data_size)) {
  915. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  916. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  917. rc = -EFAULT;
  918. }
  919. cam_generic_fence_free_input_info_util(&fence_input_info);
  920. return rc;
  921. }
  922. static int cam_generic_fence_handle_dma_import(
  923. struct cam_generic_fence_cmd_args *fence_cmd_args)
  924. {
  925. int32_t rc = 0, i, dma_fence_row_idx;
  926. struct dma_fence *fence = NULL;
  927. struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
  928. struct cam_generic_fence_input_info *fence_input_info = NULL;
  929. struct cam_generic_fence_config *fence_cfg = NULL;
  930. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  931. if (rc || !fence_input_info) {
  932. CAM_ERR(CAM_DMA_FENCE,
  933. "Fence input info validation failed rc: %d fence_input_info: %pK",
  934. rc, fence_input_info);
  935. return -EINVAL;
  936. }
  937. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  938. fence_cfg = &fence_input_info->fence_cfg[i];
  939. fence_input_info->num_fences_processed++;
  940. fence_cfg->reason_code = 0;
  941. /* Check if fd is for a valid dma fence */
  942. fence = cam_dma_fence_get_fence_from_fd(fence_cfg->dma_fence_fd,
  943. &dma_fence_row_idx);
  944. if (IS_ERR_OR_NULL(fence)) {
  945. CAM_ERR(CAM_DMA_FENCE,
  946. "Invalid dma fence for fd: %d", fence_cfg->dma_fence_fd);
  947. fence_cfg->reason_code = -EINVAL;
  948. goto out_copy;
  949. }
  950. dma_sync_create.dma_fence_row_idx = dma_fence_row_idx;
  951. dma_sync_create.fd = fence_cfg->dma_fence_fd;
  952. dma_sync_create.sync_created_with_dma = false;
  953. /* Create new sync object and associate dma fence */
  954. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  955. &dma_sync_create);
  956. if (rc) {
  957. fence_cfg->reason_code = rc;
  958. /* put on the import refcnt */
  959. cam_dma_fence_get_put_ref(false, dma_fence_row_idx);
  960. goto out_copy;
  961. }
  962. /* Register a cb for dma fence */
  963. rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj,
  964. &dma_fence_row_idx, cam_sync_dma_fence_cb);
  965. if (rc) {
  966. CAM_ERR(CAM_DMA_FENCE,
  967. "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d",
  968. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc);
  969. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj, NULL);
  970. fence_cfg->reason_code = rc;
  971. goto out_copy;
  972. }
  973. CAM_DBG(CAM_DMA_FENCE,
  974. "dma fence fd = %d imported for sync_obj = %d[%s] num fences [requested: %u processed: %u]",
  975. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, fence_cfg->name,
  976. fence_input_info->num_fences_requested,
  977. fence_input_info->num_fences_processed);
  978. }
  979. out_copy:
  980. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  981. fence_input_info, fence_cmd_args->input_data_size)) {
  982. rc = -EFAULT;
  983. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  984. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  985. }
  986. cam_generic_fence_free_input_info_util(&fence_input_info);
  987. return rc;
  988. }
  989. static int cam_generic_fence_handle_dma_signal(
  990. struct cam_generic_fence_cmd_args *fence_cmd_args)
  991. {
  992. struct cam_dma_fence_signal signal_dma_fence;
  993. if (fence_cmd_args->input_data_size < sizeof(struct cam_dma_fence_signal)) {
  994. CAM_ERR(CAM_DMA_FENCE, "Size is invalid expected: 0x%llx actual: 0x%llx",
  995. sizeof(struct cam_dma_fence_signal),
  996. fence_cmd_args->input_data_size);
  997. return -EINVAL;
  998. }
  999. if (copy_from_user(&signal_dma_fence, (void __user *)fence_cmd_args->input_handle,
  1000. fence_cmd_args->input_data_size))
  1001. return -EFAULT;
  1002. return cam_dma_fence_signal_fd(&signal_dma_fence);
  1003. }
  1004. static int cam_generic_fence_process_dma_fence_cmd(
  1005. uint32_t id,
  1006. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1007. {
  1008. int rc = -EINVAL;
  1009. switch (id) {
  1010. case CAM_GENERIC_FENCE_CREATE:
  1011. rc = cam_generic_fence_handle_dma_create(fence_cmd_args);
  1012. break;
  1013. case CAM_GENERIC_FENCE_RELEASE:
  1014. rc = cam_generic_fence_handle_dma_release(fence_cmd_args);
  1015. break;
  1016. case CAM_GENERIC_FENCE_IMPORT:
  1017. rc = cam_generic_fence_handle_dma_import(fence_cmd_args);
  1018. break;
  1019. case CAM_GENERIC_FENCE_SIGNAL:
  1020. rc = cam_generic_fence_handle_dma_signal(fence_cmd_args);
  1021. break;
  1022. default:
  1023. CAM_ERR(CAM_DMA_FENCE, "IOCTL cmd: %u not supported for dma fence", id);
  1024. break;
  1025. }
  1026. return rc;
  1027. }
  1028. static int cam_generic_fence_handle_sync_create(
  1029. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1030. {
  1031. int rc = 0, i, dma_fence_row_idx;
  1032. bool dma_fence_created;
  1033. struct cam_dma_fence_release_params release_params;
  1034. struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
  1035. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1036. struct cam_generic_fence_config *fence_cfg = NULL;
  1037. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1038. if (rc || !fence_input_info) {
  1039. CAM_ERR(CAM_SYNC,
  1040. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1041. rc, fence_input_info);
  1042. return -EINVAL;
  1043. }
  1044. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1045. fence_cfg = &fence_input_info->fence_cfg[i];
  1046. fence_input_info->num_fences_processed++;
  1047. fence_cfg->reason_code = 0;
  1048. /* Reset flag */
  1049. dma_fence_created = false;
  1050. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
  1051. (unsigned long *)&fence_cfg->fence_sel_mask)) {
  1052. rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd,
  1053. &dma_fence_row_idx, fence_cfg->name);
  1054. if (rc) {
  1055. CAM_ERR(CAM_SYNC,
  1056. "Failed to create dma fence at index: %d rc: %d num_fences: %u",
  1057. i, rc, fence_input_info->num_fences_requested);
  1058. fence_cfg->reason_code = rc;
  1059. goto out_copy;
  1060. }
  1061. dma_sync_create.dma_fence_row_idx = dma_fence_row_idx;
  1062. dma_sync_create.fd = fence_cfg->dma_fence_fd;
  1063. dma_sync_create.sync_created_with_dma = true;
  1064. dma_fence_created = true;
  1065. }
  1066. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1067. (dma_fence_created ? &dma_sync_create : NULL));
  1068. if (rc) {
  1069. fence_cfg->reason_code = rc;
  1070. if (dma_fence_created) {
  1071. release_params.use_row_idx = true;
  1072. release_params.u.dma_row_idx = dma_fence_row_idx;
  1073. cam_dma_fence_release(&release_params);
  1074. }
  1075. CAM_ERR(CAM_SYNC,
  1076. "Failed to create sync obj at index: %d rc: %d num_fences: %u",
  1077. i, rc, fence_input_info->num_fences_requested);
  1078. goto out_copy;
  1079. }
  1080. /* Register dma fence cb */
  1081. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
  1082. (unsigned long *)&fence_cfg->fence_sel_mask)) {
  1083. rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj,
  1084. &dma_fence_row_idx, cam_sync_dma_fence_cb);
  1085. if (rc) {
  1086. CAM_ERR(CAM_SYNC,
  1087. "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d",
  1088. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc);
  1089. /* Destroy sync obj */
  1090. cam_sync_deinit_object(
  1091. sync_dev->sync_table, fence_cfg->sync_obj, NULL);
  1092. /* Release dma fence */
  1093. release_params.use_row_idx = true;
  1094. release_params.u.dma_row_idx = dma_fence_row_idx;
  1095. cam_dma_fence_release(&release_params);
  1096. fence_cfg->reason_code = rc;
  1097. goto out_copy;
  1098. }
  1099. }
  1100. CAM_DBG(CAM_SYNC,
  1101. "Created sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d num fences [requested: %u processed: %u]",
  1102. fence_cfg->sync_obj, fence_cfg->name,
  1103. fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd,
  1104. fence_input_info->num_fences_requested,
  1105. fence_input_info->num_fences_processed);
  1106. }
  1107. out_copy:
  1108. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1109. fence_input_info, fence_cmd_args->input_data_size)) {
  1110. rc = -EFAULT;
  1111. CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x",
  1112. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1113. }
  1114. cam_generic_fence_free_input_info_util(&fence_input_info);
  1115. return rc;
  1116. }
  1117. static int cam_generic_fence_handle_sync_release(
  1118. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1119. {
  1120. bool failed = false;
  1121. int rc = 0, i;
  1122. struct cam_sync_check_for_dma_release check_for_dma_release;
  1123. struct cam_dma_fence_release_params release_params;
  1124. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1125. struct cam_generic_fence_config *fence_cfg = NULL;
  1126. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1127. if (rc || !fence_input_info) {
  1128. CAM_ERR(CAM_SYNC,
  1129. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1130. rc, fence_input_info);
  1131. return -EINVAL;
  1132. }
  1133. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1134. fence_cfg = &fence_input_info->fence_cfg[i];
  1135. fence_input_info->num_fences_processed++;
  1136. /* Reset fields */
  1137. fence_cfg->reason_code = 0;
  1138. check_for_dma_release.sync_created_with_dma = false;
  1139. check_for_dma_release.dma_fence_fd = fence_cfg->dma_fence_fd;
  1140. rc = cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1141. &check_for_dma_release);
  1142. if (rc) {
  1143. fence_cfg->reason_code = rc;
  1144. failed = true;
  1145. CAM_ERR(CAM_SYNC,
  1146. "Failed to release sync obj at index: %d rc: %d num_fences [requested: %u processed: %u]",
  1147. i, rc, fence_input_info->num_fences_requested,
  1148. fence_input_info->num_fences_processed);
  1149. }
  1150. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
  1151. (unsigned long *)&fence_cfg->fence_sel_mask)) {
  1152. if (!check_for_dma_release.sync_created_with_dma) {
  1153. CAM_ERR(CAM_SYNC,
  1154. "Failed to release dma fence fd: %d with sync_obj: %d, not created together",
  1155. fence_cfg->dma_fence_fd, fence_cfg->sync_obj);
  1156. failed = true;
  1157. fence_cfg->reason_code = -EPERM;
  1158. continue;
  1159. }
  1160. release_params.use_row_idx = true;
  1161. release_params.u.dma_row_idx = check_for_dma_release.dma_fence_row_idx;
  1162. rc = cam_dma_fence_release(&release_params);
  1163. if (rc) {
  1164. CAM_ERR(CAM_SYNC,
  1165. "Failed to destroy dma fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1166. i, rc, fence_input_info->num_fences_requested,
  1167. fence_input_info->num_fences_processed);
  1168. fence_cfg->reason_code = rc;
  1169. failed = true;
  1170. continue;
  1171. }
  1172. }
  1173. CAM_DBG(CAM_SYNC,
  1174. "Released sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d num fences [requested: %u processed: %u]",
  1175. fence_cfg->sync_obj, fence_cfg->name,
  1176. fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd,
  1177. fence_input_info->num_fences_requested,
  1178. fence_input_info->num_fences_processed);
  1179. }
  1180. if (failed)
  1181. rc = -ENOMSG;
  1182. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1183. fence_input_info, fence_cmd_args->input_data_size)) {
  1184. rc = -EFAULT;
  1185. CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x",
  1186. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1187. }
  1188. cam_generic_fence_free_input_info_util(&fence_input_info);
  1189. return rc;
  1190. }
  1191. static int cam_generic_fence_process_sync_obj_cmd(
  1192. uint32_t id,
  1193. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1194. {
  1195. int rc = -EINVAL;
  1196. switch (id) {
  1197. case CAM_GENERIC_FENCE_CREATE:
  1198. rc = cam_generic_fence_handle_sync_create(fence_cmd_args);
  1199. break;
  1200. case CAM_GENERIC_FENCE_RELEASE:
  1201. rc = cam_generic_fence_handle_sync_release(fence_cmd_args);
  1202. break;
  1203. default:
  1204. CAM_ERR(CAM_SYNC, "IOCTL cmd: %u not supported for sync object", id);
  1205. break;
  1206. }
  1207. return rc;
  1208. }
  1209. static int cam_generic_fence_parser(
  1210. struct cam_private_ioctl_arg *k_ioctl)
  1211. {
  1212. int rc;
  1213. struct cam_generic_fence_cmd_args fence_cmd_args;
  1214. if (!k_ioctl->ioctl_ptr) {
  1215. CAM_ERR(CAM_SYNC, "Invalid args input ptr: %p",
  1216. k_ioctl->ioctl_ptr);
  1217. return -EINVAL;
  1218. }
  1219. if (k_ioctl->size != sizeof(struct cam_generic_fence_cmd_args)) {
  1220. CAM_ERR(CAM_SYNC, "Size mismatch expected: 0x%llx actual: 0x%llx",
  1221. sizeof(struct cam_generic_fence_cmd_args), k_ioctl->size);
  1222. return -EINVAL;
  1223. }
  1224. if (copy_from_user(&fence_cmd_args, u64_to_user_ptr(k_ioctl->ioctl_ptr),
  1225. sizeof(fence_cmd_args))) {
  1226. CAM_ERR(CAM_SYNC, "copy from user failed for input ptr: %pK",
  1227. k_ioctl->ioctl_ptr);
  1228. return -EFAULT;
  1229. }
  1230. if (fence_cmd_args.input_handle_type != CAM_HANDLE_USER_POINTER) {
  1231. CAM_ERR(CAM_SYNC, "Invalid handle type: %u",
  1232. fence_cmd_args.input_handle_type);
  1233. return -EINVAL;
  1234. }
  1235. switch (fence_cmd_args.fence_type) {
  1236. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  1237. rc = cam_generic_fence_process_sync_obj_cmd(k_ioctl->id, &fence_cmd_args);
  1238. break;
  1239. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  1240. rc = cam_generic_fence_process_dma_fence_cmd(k_ioctl->id, &fence_cmd_args);
  1241. break;
  1242. default:
  1243. rc = -EINVAL;
  1244. CAM_ERR(CAM_SYNC, "fence type: 0x%x handling not supported",
  1245. fence_cmd_args.fence_type);
  1246. break;
  1247. }
  1248. return rc;
  1249. }
  1250. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  1251. bool valid_prio, unsigned int cmd, void *arg)
  1252. {
  1253. int32_t rc;
  1254. struct sync_device *sync_dev = video_drvdata(filep);
  1255. struct cam_private_ioctl_arg k_ioctl;
  1256. if (!sync_dev) {
  1257. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  1258. return -EINVAL;
  1259. }
  1260. if (!arg)
  1261. return -EINVAL;
  1262. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  1263. return -ENOIOCTLCMD;
  1264. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  1265. switch (k_ioctl.id) {
  1266. case CAM_SYNC_CREATE:
  1267. rc = cam_sync_handle_create(&k_ioctl);
  1268. break;
  1269. case CAM_SYNC_DESTROY:
  1270. rc = cam_sync_handle_destroy(&k_ioctl);
  1271. break;
  1272. case CAM_SYNC_REGISTER_PAYLOAD:
  1273. rc = cam_sync_handle_register_user_payload(
  1274. &k_ioctl);
  1275. break;
  1276. case CAM_SYNC_DEREGISTER_PAYLOAD:
  1277. rc = cam_sync_handle_deregister_user_payload(
  1278. &k_ioctl);
  1279. break;
  1280. case CAM_SYNC_SIGNAL:
  1281. rc = cam_sync_handle_signal(&k_ioctl);
  1282. break;
  1283. case CAM_SYNC_MERGE:
  1284. rc = cam_sync_handle_merge(&k_ioctl);
  1285. break;
  1286. case CAM_SYNC_WAIT:
  1287. rc = cam_sync_handle_wait(&k_ioctl);
  1288. ((struct cam_private_ioctl_arg *)arg)->result =
  1289. k_ioctl.result;
  1290. break;
  1291. case CAM_GENERIC_FENCE_CREATE:
  1292. case CAM_GENERIC_FENCE_RELEASE:
  1293. case CAM_GENERIC_FENCE_IMPORT:
  1294. case CAM_GENERIC_FENCE_SIGNAL:
  1295. rc = cam_generic_fence_parser(&k_ioctl);
  1296. break;
  1297. default:
  1298. rc = -ENOIOCTLCMD;
  1299. }
  1300. return rc;
  1301. }
  1302. static unsigned int cam_sync_poll(struct file *f,
  1303. struct poll_table_struct *pll_table)
  1304. {
  1305. int rc = 0;
  1306. struct v4l2_fh *eventq = f->private_data;
  1307. if (!eventq)
  1308. return -EINVAL;
  1309. poll_wait(f, &eventq->wait, pll_table);
  1310. if (v4l2_event_pending(eventq))
  1311. rc = POLLPRI;
  1312. return rc;
  1313. }
  1314. static int cam_sync_open(struct file *filep)
  1315. {
  1316. int rc;
  1317. struct sync_device *sync_dev = video_drvdata(filep);
  1318. if (!sync_dev) {
  1319. CAM_ERR(CAM_SYNC, "Sync device NULL");
  1320. return -ENODEV;
  1321. }
  1322. mutex_lock(&sync_dev->table_lock);
  1323. if (sync_dev->open_cnt >= 1) {
  1324. mutex_unlock(&sync_dev->table_lock);
  1325. return -EALREADY;
  1326. }
  1327. rc = v4l2_fh_open(filep);
  1328. if (!rc) {
  1329. sync_dev->open_cnt++;
  1330. cam_dma_fence_open();
  1331. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  1332. sync_dev->cam_sync_eventq = filep->private_data;
  1333. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  1334. } else {
  1335. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  1336. }
  1337. mutex_unlock(&sync_dev->table_lock);
  1338. return rc;
  1339. }
  1340. static int cam_sync_close(struct file *filep)
  1341. {
  1342. int rc = 0;
  1343. int i;
  1344. struct sync_device *sync_dev = video_drvdata(filep);
  1345. if (!sync_dev) {
  1346. CAM_ERR(CAM_SYNC, "Sync device NULL");
  1347. rc = -ENODEV;
  1348. return rc;
  1349. }
  1350. mutex_lock(&sync_dev->table_lock);
  1351. sync_dev->open_cnt--;
  1352. if (!sync_dev->open_cnt) {
  1353. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  1354. struct sync_table_row *row =
  1355. sync_dev->sync_table + i;
  1356. /*
  1357. * Signal all ACTIVE objects as ERR, but we don't
  1358. * care about the return status here apart from logging
  1359. * it.
  1360. */
  1361. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  1362. rc = cam_sync_signal(i,
  1363. CAM_SYNC_STATE_SIGNALED_ERROR,
  1364. CAM_SYNC_COMMON_RELEASE_EVENT);
  1365. if (rc < 0)
  1366. CAM_ERR(CAM_SYNC,
  1367. "Cleanup signal fail idx:%d", i);
  1368. }
  1369. }
  1370. /*
  1371. * Flush the work queue to wait for pending signal callbacks to
  1372. * finish
  1373. */
  1374. flush_workqueue(sync_dev->work_queue);
  1375. /*
  1376. * Now that all callbacks worker threads have finished,
  1377. * destroy the sync objects
  1378. */
  1379. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  1380. struct sync_table_row *row =
  1381. sync_dev->sync_table + i;
  1382. if (row->state != CAM_SYNC_STATE_INVALID) {
  1383. rc = cam_sync_destroy(i);
  1384. if (rc < 0)
  1385. CAM_ERR(CAM_SYNC,
  1386. "Cleanup destroy fail:idx:%d\n", i);
  1387. }
  1388. }
  1389. }
  1390. /* Clean dma fence table */
  1391. cam_dma_fence_close();
  1392. mutex_unlock(&sync_dev->table_lock);
  1393. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  1394. sync_dev->cam_sync_eventq = NULL;
  1395. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  1396. v4l2_fh_release(filep);
  1397. return rc;
  1398. }
  1399. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  1400. struct v4l2_event *new)
  1401. {
  1402. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  1403. struct cam_sync_ev_header_v2 *ev_header;
  1404. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  1405. CAM_ERR(CAM_CRM,
  1406. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  1407. old->id, ev_header->sync_obj, ev_header->status,
  1408. ev_header->evt_param[0], ev_header->evt_param[1],
  1409. ev_header->evt_param[2], ev_header->evt_param[3]);
  1410. } else {
  1411. struct cam_sync_ev_header *ev_header;
  1412. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  1413. CAM_ERR(CAM_CRM,
  1414. "Failed to notify event id %d fence %d statue %d",
  1415. old->id, ev_header->sync_obj, ev_header->status);
  1416. }
  1417. }
  1418. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  1419. .merge = cam_sync_event_queue_notify_error,
  1420. };
  1421. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  1422. const struct v4l2_event_subscription *sub)
  1423. {
  1424. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  1425. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  1426. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  1427. return -EINVAL;
  1428. }
  1429. sync_dev->version = sub->type;
  1430. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  1431. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  1432. &cam_sync_v4l2_ops);
  1433. }
  1434. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  1435. const struct v4l2_event_subscription *sub)
  1436. {
  1437. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  1438. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  1439. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  1440. return -EINVAL;
  1441. }
  1442. return v4l2_event_unsubscribe(fh, sub);
  1443. }
  1444. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  1445. .vidioc_subscribe_event = cam_sync_subscribe_event,
  1446. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  1447. .vidioc_default = cam_sync_dev_ioctl,
  1448. };
  1449. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  1450. .owner = THIS_MODULE,
  1451. .open = cam_sync_open,
  1452. .release = cam_sync_close,
  1453. .poll = cam_sync_poll,
  1454. .unlocked_ioctl = video_ioctl2,
  1455. #ifdef CONFIG_COMPAT
  1456. .compat_ioctl32 = video_ioctl2,
  1457. #endif
  1458. };
  1459. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  1460. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  1461. struct platform_device *pdev)
  1462. {
  1463. int rc;
  1464. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  1465. GFP_KERNEL);
  1466. if (!sync_dev->v4l2_dev.mdev)
  1467. return -ENOMEM;
  1468. media_device_init(sync_dev->v4l2_dev.mdev);
  1469. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  1470. sizeof(sync_dev->v4l2_dev.mdev->model));
  1471. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  1472. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  1473. if (rc < 0)
  1474. goto register_fail;
  1475. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  1476. if (rc < 0)
  1477. goto entity_fail;
  1478. return 0;
  1479. entity_fail:
  1480. media_device_unregister(sync_dev->v4l2_dev.mdev);
  1481. register_fail:
  1482. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  1483. return rc;
  1484. }
  1485. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  1486. {
  1487. media_entity_cleanup(&sync_dev->vdev->entity);
  1488. media_device_unregister(sync_dev->v4l2_dev.mdev);
  1489. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  1490. kfree(sync_dev->v4l2_dev.mdev);
  1491. }
  1492. static void cam_sync_init_entity(struct sync_device *sync_dev)
  1493. {
  1494. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  1495. sync_dev->vdev->entity.name =
  1496. video_device_node_name(sync_dev->vdev);
  1497. }
  1498. #else
  1499. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  1500. struct platform_device *pdev)
  1501. {
  1502. return 0;
  1503. }
  1504. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  1505. {
  1506. }
  1507. static void cam_sync_init_entity(struct sync_device *sync_dev)
  1508. {
  1509. }
  1510. #endif
  1511. static int cam_sync_create_debugfs(void)
  1512. {
  1513. int rc = 0;
  1514. struct dentry *dbgfileptr = NULL;
  1515. if (!cam_debugfs_available())
  1516. return 0;
  1517. rc = cam_debugfs_create_subdir("sync", &dbgfileptr);
  1518. if (rc) {
  1519. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  1520. rc = -ENOENT;
  1521. goto end;
  1522. }
  1523. /* Store parent inode for cleanup in caller */
  1524. sync_dev->dentry = dbgfileptr;
  1525. debugfs_create_bool("trigger_cb_without_switch", 0644,
  1526. sync_dev->dentry, &trigger_cb_without_switch);
  1527. end:
  1528. return rc;
  1529. }
  1530. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1531. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  1532. {
  1533. int rc = 0;
  1534. uint32_t sync_status = synx_status;
  1535. switch (synx_status) {
  1536. case SYNX_STATE_ACTIVE:
  1537. sync_status = CAM_SYNC_STATE_ACTIVE;
  1538. break;
  1539. case SYNX_STATE_SIGNALED_SUCCESS:
  1540. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  1541. break;
  1542. case SYNX_STATE_SIGNALED_ERROR:
  1543. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  1544. break;
  1545. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  1546. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  1547. break;
  1548. default:
  1549. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  1550. synx_status, sync_obj);
  1551. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  1552. break;
  1553. }
  1554. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  1555. if (rc) {
  1556. CAM_ERR(CAM_SYNC,
  1557. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  1558. sync_obj, synx_status, sync_status, rc);
  1559. }
  1560. return rc;
  1561. }
  1562. static int cam_sync_register_synx_bind_ops(
  1563. struct synx_register_params *object)
  1564. {
  1565. int rc = 0;
  1566. rc = synx_register_ops(object);
  1567. if (rc)
  1568. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  1569. return rc;
  1570. }
  1571. static void cam_sync_unregister_synx_bind_ops(
  1572. struct synx_register_params *object)
  1573. {
  1574. int rc = 0;
  1575. rc = synx_deregister_ops(object);
  1576. if (rc)
  1577. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  1578. }
  1579. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  1580. {
  1581. struct synx_register_params *params = object;
  1582. params->name = CAM_SYNC_NAME;
  1583. params->type = SYNX_TYPE_CSL;
  1584. params->ops.register_callback = cam_sync_register_callback;
  1585. params->ops.deregister_callback = cam_sync_deregister_callback;
  1586. params->ops.enable_signaling = cam_sync_get_obj_ref;
  1587. params->ops.signal = cam_synx_sync_signal;
  1588. }
  1589. #endif
  1590. static int cam_sync_component_bind(struct device *dev,
  1591. struct device *master_dev, void *data)
  1592. {
  1593. int rc;
  1594. int idx;
  1595. struct platform_device *pdev = to_platform_device(dev);
  1596. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  1597. if (!sync_dev)
  1598. return -ENOMEM;
  1599. mutex_init(&sync_dev->table_lock);
  1600. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  1601. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  1602. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  1603. sync_dev->vdev = video_device_alloc();
  1604. if (!sync_dev->vdev) {
  1605. rc = -ENOMEM;
  1606. goto vdev_fail;
  1607. }
  1608. rc = cam_sync_media_controller_init(sync_dev, pdev);
  1609. if (rc < 0)
  1610. goto mcinit_fail;
  1611. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  1612. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  1613. if (rc < 0)
  1614. goto register_fail;
  1615. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  1616. sizeof(sync_dev->vdev->name));
  1617. sync_dev->vdev->release = video_device_release_empty;
  1618. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  1619. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  1620. sync_dev->vdev->minor = -1;
  1621. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  1622. sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO;
  1623. rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1);
  1624. if (rc < 0) {
  1625. CAM_ERR(CAM_SYNC,
  1626. "video device registration failure rc = %d, name = %s, device_caps = %d",
  1627. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  1628. goto v4l2_fail;
  1629. }
  1630. cam_sync_init_entity(sync_dev);
  1631. video_set_drvdata(sync_dev->vdev, sync_dev);
  1632. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  1633. /*
  1634. * We treat zero as invalid handle, so we will keep the 0th bit set
  1635. * always
  1636. */
  1637. set_bit(0, sync_dev->bitmap);
  1638. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  1639. WQ_HIGHPRI | WQ_UNBOUND, 1);
  1640. if (!sync_dev->work_queue) {
  1641. CAM_ERR(CAM_SYNC,
  1642. "Error: high priority work queue creation failed");
  1643. rc = -ENOMEM;
  1644. goto v4l2_fail;
  1645. }
  1646. /* Initialize dma fence driver */
  1647. rc = cam_dma_fence_driver_init();
  1648. if (rc) {
  1649. CAM_ERR(CAM_SYNC,
  1650. "DMA fence driver initialization failed rc: %d", rc);
  1651. goto workq_destroy;
  1652. }
  1653. trigger_cb_without_switch = false;
  1654. cam_sync_create_debugfs();
  1655. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1656. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  1657. cam_sync_configure_synx_obj(&sync_dev->params);
  1658. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  1659. if (rc)
  1660. goto dma_driver_deinit;
  1661. #endif
  1662. CAM_DBG(CAM_SYNC, "Component bound successfully");
  1663. return rc;
  1664. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1665. dma_driver_deinit:
  1666. cam_dma_fence_driver_deinit();
  1667. #endif
  1668. workq_destroy:
  1669. destroy_workqueue(sync_dev->work_queue);
  1670. v4l2_fail:
  1671. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1672. register_fail:
  1673. cam_sync_media_controller_cleanup(sync_dev);
  1674. mcinit_fail:
  1675. video_unregister_device(sync_dev->vdev);
  1676. video_device_release(sync_dev->vdev);
  1677. vdev_fail:
  1678. mutex_destroy(&sync_dev->table_lock);
  1679. kfree(sync_dev);
  1680. return rc;
  1681. }
  1682. static void cam_sync_component_unbind(struct device *dev,
  1683. struct device *master_dev, void *data)
  1684. {
  1685. int i;
  1686. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  1687. cam_sync_media_controller_cleanup(sync_dev);
  1688. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  1689. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  1690. #endif
  1691. video_unregister_device(sync_dev->vdev);
  1692. video_device_release(sync_dev->vdev);
  1693. sync_dev->dentry = NULL;
  1694. cam_dma_fence_driver_deinit();
  1695. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  1696. spin_lock_init(&sync_dev->row_spinlocks[i]);
  1697. kfree(sync_dev);
  1698. sync_dev = NULL;
  1699. }
  1700. const static struct component_ops cam_sync_component_ops = {
  1701. .bind = cam_sync_component_bind,
  1702. .unbind = cam_sync_component_unbind,
  1703. };
  1704. static int cam_sync_probe(struct platform_device *pdev)
  1705. {
  1706. int rc = 0;
  1707. CAM_DBG(CAM_SYNC, "Adding Sync component");
  1708. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  1709. if (rc)
  1710. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  1711. return rc;
  1712. }
  1713. static int cam_sync_remove(struct platform_device *pdev)
  1714. {
  1715. component_del(&pdev->dev, &cam_sync_component_ops);
  1716. return 0;
  1717. }
  1718. static const struct of_device_id cam_sync_dt_match[] = {
  1719. {.compatible = "qcom,cam-sync"},
  1720. {}
  1721. };
  1722. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  1723. struct platform_driver cam_sync_driver = {
  1724. .probe = cam_sync_probe,
  1725. .remove = cam_sync_remove,
  1726. .driver = {
  1727. .name = "cam_sync",
  1728. .owner = THIS_MODULE,
  1729. .of_match_table = cam_sync_dt_match,
  1730. .suppress_bind_attrs = true,
  1731. },
  1732. };
  1733. int cam_sync_init(void)
  1734. {
  1735. return platform_driver_register(&cam_sync_driver);
  1736. }
  1737. void cam_sync_exit(void)
  1738. {
  1739. platform_driver_unregister(&cam_sync_driver);
  1740. }
  1741. MODULE_DESCRIPTION("Camera sync driver");
  1742. MODULE_LICENSE("GPL v2");