cam_sync.c 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/irqflags.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/debugfs.h>
  12. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  13. #include <synx_api.h>
  14. #endif
  15. #include "cam_sync_util.h"
  16. #include "cam_debug_util.h"
  17. #include "cam_common_util.h"
  18. #include "cam_compat.h"
  19. #include "camera_main.h"
  20. #include "cam_req_mgr_workq.h"
  21. struct sync_device *sync_dev;
  22. /*
  23. * Flag to determine whether to enqueue cb of a
  24. * signaled fence onto the workq or invoke it
  25. * directly in the same context
  26. */
  27. static bool trigger_cb_without_switch;
  28. unsigned long cam_sync_monitor_mask;
  29. static void cam_sync_print_fence_table(void)
  30. {
  31. int idx;
  32. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  33. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  34. CAM_INFO(CAM_SYNC,
  35. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  36. idx,
  37. sync_dev->sync_table[idx].sync_id,
  38. sync_dev->sync_table[idx].name,
  39. sync_dev->sync_table[idx].type,
  40. sync_dev->sync_table[idx].state,
  41. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  42. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  43. }
  44. }
  45. static int cam_sync_create_util(
  46. int32_t *sync_obj, const char *name,
  47. struct cam_dma_fence_create_sync_obj_payload *dma_sync_create_info,
  48. struct sync_synx_obj_info *synx_obj_sync_create_info)
  49. {
  50. int rc;
  51. long idx;
  52. bool bit;
  53. struct sync_table_row *row = NULL;
  54. do {
  55. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  56. if (idx >= CAM_SYNC_MAX_OBJS) {
  57. CAM_ERR(CAM_SYNC,
  58. "Error: Unable to create sync idx = %d sync name = %s reached max!",
  59. idx, name);
  60. cam_sync_print_fence_table();
  61. return -ENOMEM;
  62. }
  63. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  64. bit = test_and_set_bit(idx, sync_dev->bitmap);
  65. } while (bit);
  66. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  67. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  68. CAM_SYNC_TYPE_INDV);
  69. if (rc) {
  70. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  71. idx);
  72. clear_bit(idx, sync_dev->bitmap);
  73. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  74. return -EINVAL;
  75. }
  76. *sync_obj = idx;
  77. /* Associate sync obj with synx if any holding sync lock */
  78. if (synx_obj_sync_create_info) {
  79. row = sync_dev->sync_table + idx;
  80. row->synx_obj_info.synx_obj_row_idx =
  81. synx_obj_sync_create_info->synx_obj_row_idx;
  82. row->synx_obj_info.sync_created_with_synx =
  83. synx_obj_sync_create_info->sync_created_with_synx;
  84. row->synx_obj_info.synx_obj = synx_obj_sync_create_info->synx_obj;
  85. set_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask);
  86. CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with synx_obj: %d",
  87. name, *sync_obj, row->synx_obj_info.synx_obj);
  88. }
  89. /* Associate sync obj with dma fence if any holding sync lock */
  90. if (dma_sync_create_info) {
  91. row = sync_dev->sync_table + idx;
  92. row->dma_fence_info.dma_fence_fd = dma_sync_create_info->fd;
  93. row->dma_fence_info.dma_fence_row_idx = dma_sync_create_info->dma_fence_row_idx;
  94. row->dma_fence_info.sync_created_with_dma =
  95. dma_sync_create_info->sync_created_with_dma;
  96. set_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask);
  97. /* Association refcnt for non-import cases */
  98. if (dma_sync_create_info->sync_created_with_dma) {
  99. rc = cam_dma_fence_get_put_ref(true, row->dma_fence_info.dma_fence_row_idx);
  100. if (rc)
  101. CAM_ERR(CAM_SYNC,
  102. "Failed to getref on dma fence idx: %u fd: %d sync_obj: %d rc: %d",
  103. row->dma_fence_info.dma_fence_row_idx,
  104. row->dma_fence_info.dma_fence_fd,
  105. *sync_obj, rc);
  106. goto end;
  107. }
  108. CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with dma fence fd: %d",
  109. name, *sync_obj, dma_sync_create_info->fd);
  110. goto end;
  111. }
  112. CAM_DBG(CAM_SYNC, "sync_obj: %s[%i]", name, *sync_obj);
  113. end:
  114. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  115. return rc;
  116. }
  117. int cam_sync_create(int32_t *sync_obj, const char *name)
  118. {
  119. return cam_sync_create_util(sync_obj, name, NULL, NULL);
  120. }
  121. int cam_sync_register_callback(sync_callback cb_func,
  122. void *userdata, int32_t sync_obj)
  123. {
  124. struct sync_callback_info *sync_cb;
  125. struct sync_table_row *row = NULL;
  126. int status = 0;
  127. int rc = 0;
  128. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0) || (!cb_func))
  129. return -EINVAL;
  130. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  131. row = sync_dev->sync_table + sync_obj;
  132. if (row->state == CAM_SYNC_STATE_INVALID) {
  133. CAM_ERR(CAM_SYNC,
  134. "Error: accessing an uninitialized sync obj %s[%d]",
  135. row->name,
  136. sync_obj);
  137. rc = -EINVAL;
  138. goto monitor_dump;
  139. }
  140. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  141. if (!sync_cb) {
  142. rc = -ENOMEM;
  143. goto monitor_dump;
  144. }
  145. /* Trigger callback if sync object is already in SIGNALED state */
  146. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  147. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  148. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  149. (!row->remaining)) {
  150. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  151. &cam_sync_monitor_mask))
  152. cam_generic_fence_update_monitor_array(sync_obj,
  153. &sync_dev->table_lock, sync_dev->mon_data,
  154. CAM_FENCE_OP_SKIP_REGISTER_CB);
  155. if (trigger_cb_without_switch) {
  156. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]",
  157. row->name,
  158. sync_obj);
  159. status = row->state;
  160. kfree(sync_cb);
  161. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  162. cb_func(sync_obj, status, userdata);
  163. } else {
  164. sync_cb->callback_func = cb_func;
  165. sync_cb->cb_data = userdata;
  166. sync_cb->sync_obj = sync_obj;
  167. INIT_WORK(&sync_cb->cb_dispatch_work,
  168. cam_sync_util_cb_dispatch);
  169. sync_cb->status = row->state;
  170. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%s[%d]",
  171. row->name,
  172. sync_cb->sync_obj);
  173. sync_cb->workq_scheduled_ts = ktime_get();
  174. queue_work(sync_dev->work_queue,
  175. &sync_cb->cb_dispatch_work);
  176. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  177. }
  178. return 0;
  179. }
  180. sync_cb->callback_func = cb_func;
  181. sync_cb->cb_data = userdata;
  182. sync_cb->sync_obj = sync_obj;
  183. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  184. list_add_tail(&sync_cb->list, &row->callback_list);
  185. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
  186. cam_generic_fence_update_monitor_array(sync_obj,
  187. &sync_dev->table_lock, sync_dev->mon_data,
  188. CAM_FENCE_OP_REGISTER_CB);
  189. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  190. return 0;
  191. monitor_dump:
  192. cam_sync_dump_monitor_array(row);
  193. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  194. return rc;
  195. }
  196. int cam_sync_deregister_callback(sync_callback cb_func,
  197. void *userdata, int32_t sync_obj)
  198. {
  199. struct sync_table_row *row = NULL;
  200. struct sync_callback_info *sync_cb, *temp;
  201. bool found = false;
  202. int rc = 0;
  203. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  204. return -EINVAL;
  205. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  206. row = sync_dev->sync_table + sync_obj;
  207. if (row->state == CAM_SYNC_STATE_INVALID) {
  208. CAM_ERR(CAM_SYNC,
  209. "Error: accessing an uninitialized sync obj = %s[%d]",
  210. row->name,
  211. sync_obj);
  212. rc = -EINVAL;
  213. goto monitor_dump;
  214. }
  215. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]",
  216. row->name,
  217. sync_obj);
  218. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  219. if ((sync_cb->callback_func == cb_func) &&
  220. (sync_cb->cb_data == userdata)) {
  221. list_del_init(&sync_cb->list);
  222. kfree(sync_cb);
  223. found = true;
  224. }
  225. }
  226. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  227. if (found) {
  228. cam_generic_fence_update_monitor_array(sync_obj,
  229. &sync_dev->table_lock, sync_dev->mon_data,
  230. CAM_FENCE_OP_UNREGISTER_CB);
  231. } else {
  232. CAM_ERR(CAM_SYNC,
  233. "Error: Callback not found sync obj = %s[%d] : sync_id %d, state %d",
  234. row->name, sync_obj, row->sync_id, row->state);
  235. cam_sync_dump_monitor_array(row);
  236. }
  237. }
  238. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  239. return found ? 0 : -ENOENT;
  240. monitor_dump:
  241. cam_sync_dump_monitor_array(row);
  242. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  243. return rc;
  244. }
  245. static inline int cam_sync_signal_dma_fence_util(
  246. struct sync_table_row *row, uint32_t status)
  247. {
  248. struct cam_dma_fence_signal signal_dma_fence;
  249. signal_dma_fence.dma_fence_fd = row->dma_fence_info.dma_fence_fd;
  250. switch (status) {
  251. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  252. signal_dma_fence.status = 0;
  253. break;
  254. case CAM_SYNC_STATE_SIGNALED_ERROR:
  255. /* Advertise error */
  256. signal_dma_fence.status = -EADV;
  257. break;
  258. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  259. signal_dma_fence.status = -ECANCELED;
  260. break;
  261. default:
  262. CAM_ERR(CAM_SYNC,
  263. "Signaling undefined status: %d for sync obj: %d",
  264. status, row->sync_id);
  265. return -EINVAL;
  266. }
  267. return cam_dma_fence_internal_signal(row->dma_fence_info.dma_fence_row_idx,
  268. &signal_dma_fence);
  269. }
  270. static void cam_sync_signal_parent_util(int32_t status,
  271. uint32_t event_cause, struct list_head *parents_list)
  272. {
  273. int rc;
  274. struct sync_table_row *parent_row = NULL;
  275. struct sync_parent_info *parent_info, *temp_parent_info;
  276. /*
  277. * Now iterate over all parents of this object and if they too need to
  278. * be signaled dispatch cb's
  279. */
  280. list_for_each_entry_safe(parent_info, temp_parent_info,
  281. parents_list, list) {
  282. parent_row = sync_dev->sync_table + parent_info->sync_id;
  283. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  284. parent_row->remaining--;
  285. rc = cam_sync_util_update_parent_state(
  286. parent_row,
  287. status);
  288. if (rc) {
  289. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  290. parent_row->state);
  291. spin_unlock_bh(
  292. &sync_dev->row_spinlocks[parent_info->sync_id]);
  293. kfree(parent_info);
  294. continue;
  295. }
  296. if (!parent_row->remaining)
  297. cam_sync_util_dispatch_signaled_cb(
  298. parent_info->sync_id, parent_row->state,
  299. event_cause);
  300. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  301. &cam_sync_monitor_mask))
  302. cam_generic_fence_update_monitor_array(parent_info->sync_id,
  303. &sync_dev->table_lock, sync_dev->mon_data,
  304. CAM_FENCE_OP_SIGNAL);
  305. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  306. list_del_init(&parent_info->list);
  307. kfree(parent_info);
  308. }
  309. }
  310. static int cam_sync_signal_validate_util(
  311. int32_t sync_obj, int32_t status)
  312. {
  313. struct sync_table_row *row = sync_dev->sync_table + sync_obj;
  314. if (row->state == CAM_SYNC_STATE_INVALID) {
  315. CAM_ERR(CAM_SYNC,
  316. "Error: accessing an uninitialized sync obj = %s[%d]",
  317. row->name, sync_obj);
  318. return -EINVAL;
  319. }
  320. if (row->type == CAM_SYNC_TYPE_GROUP) {
  321. CAM_ERR(CAM_SYNC,
  322. "Error: Signaling a GROUP sync object = %s[%d]",
  323. row->name, sync_obj);
  324. return -EINVAL;
  325. }
  326. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  327. CAM_ERR(CAM_SYNC,
  328. "Error: Sync object already signaled sync_obj = %s[%d]",
  329. row->name, sync_obj);
  330. return -EALREADY;
  331. }
  332. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  333. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  334. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  335. CAM_ERR(CAM_SYNC,
  336. "Error: signaling with undefined status = %d", status);
  337. return -EINVAL;
  338. }
  339. return 0;
  340. }
  341. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  342. {
  343. struct sync_table_row *row = NULL;
  344. struct list_head parents_list;
  345. int rc = 0, synx_row_idx = 0;
  346. uint32_t synx_obj = 0;
  347. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  348. struct cam_synx_obj_signal signal_synx_obj;
  349. #endif
  350. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) {
  351. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  352. sync_obj, CAM_SYNC_MAX_OBJS);
  353. return -EINVAL;
  354. }
  355. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  356. row = sync_dev->sync_table + sync_obj;
  357. rc = cam_sync_signal_validate_util(sync_obj, status);
  358. if (rc) {
  359. CAM_ERR(CAM_SYNC,
  360. "Error: Failed to validate signal info for sync_obj = %s[%d] with status = %d rc = %d",
  361. row->name, sync_obj, status, rc);
  362. goto monitor_dump;
  363. }
  364. if (!atomic_dec_and_test(&row->ref_cnt)) {
  365. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  366. return 0;
  367. }
  368. row->state = status;
  369. /*
  370. * Signal associated dma fence first - external entities
  371. * waiting on this fence can start processing
  372. */
  373. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  374. rc = cam_sync_signal_dma_fence_util(row, status);
  375. if (rc)
  376. CAM_ERR(CAM_SYNC,
  377. "Error: Failed to signal associated dma fencefd = %d for sync_obj = %s[%d]",
  378. row->dma_fence_info.dma_fence_fd, row->name, sync_obj);
  379. }
  380. /* Obtain associated synx hdl if any with the row lock held */
  381. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  382. synx_obj = row->synx_obj_info.synx_obj;
  383. synx_row_idx = row->synx_obj_info.synx_obj_row_idx;
  384. }
  385. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  386. /* copy parent list to local and release child lock */
  387. INIT_LIST_HEAD(&parents_list);
  388. list_splice_init(&row->parents_list, &parents_list);
  389. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
  390. cam_generic_fence_update_monitor_array(sync_obj,
  391. &sync_dev->table_lock, sync_dev->mon_data,
  392. CAM_FENCE_OP_SIGNAL);
  393. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  394. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  395. /*
  396. * Signal associated synx obj after unlock
  397. */
  398. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  399. signal_synx_obj.status = status;
  400. signal_synx_obj.synx_obj = synx_obj;
  401. rc = cam_synx_obj_internal_signal(synx_row_idx, &signal_synx_obj);
  402. if (rc)
  403. CAM_ERR(CAM_SYNC,
  404. "Error: Failed to signal associated synx obj = %d for sync_obj = %d",
  405. synx_obj, sync_obj);
  406. }
  407. #endif
  408. if (list_empty(&parents_list))
  409. return 0;
  410. cam_sync_signal_parent_util(status, event_cause, &parents_list);
  411. return 0;
  412. monitor_dump:
  413. cam_sync_dump_monitor_array(row);
  414. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  415. return rc;
  416. }
  417. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  418. {
  419. int rc;
  420. long idx = 0;
  421. bool bit;
  422. int i = 0;
  423. if ((!sync_obj) || (!merged_obj)) {
  424. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  425. return -EINVAL;
  426. }
  427. if (num_objs <= 1) {
  428. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  429. return -EINVAL;
  430. }
  431. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  432. != num_objs) {
  433. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  434. return -EINVAL;
  435. }
  436. for (i = 0; i < num_objs; i++) {
  437. rc = cam_sync_check_valid(sync_obj[i]);
  438. if (rc) {
  439. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  440. i, sync_obj[i]);
  441. return rc;
  442. }
  443. }
  444. do {
  445. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  446. if (idx >= CAM_SYNC_MAX_OBJS)
  447. return -ENOMEM;
  448. bit = test_and_set_bit(idx, sync_dev->bitmap);
  449. } while (bit);
  450. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  451. rc = cam_sync_init_group_object(sync_dev->sync_table,
  452. idx, sync_obj,
  453. num_objs);
  454. if (rc < 0) {
  455. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  456. idx);
  457. clear_bit(idx, sync_dev->bitmap);
  458. return -EINVAL;
  459. }
  460. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  461. *merged_obj = idx;
  462. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  463. return 0;
  464. }
  465. int cam_sync_get_obj_ref(int32_t sync_obj)
  466. {
  467. struct sync_table_row *row = NULL;
  468. int rc;
  469. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  470. return -EINVAL;
  471. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  472. row = sync_dev->sync_table + sync_obj;
  473. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  474. CAM_ERR(CAM_SYNC,
  475. "Error: accessing an uninitialized sync obj = %s[%d]",
  476. row->name,
  477. sync_obj);
  478. rc = -EINVAL;
  479. goto monitor_dump;
  480. }
  481. atomic_inc(&row->ref_cnt);
  482. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  483. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  484. return 0;
  485. monitor_dump:
  486. cam_sync_dump_monitor_array(row);
  487. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  488. return rc;
  489. }
  490. int cam_sync_put_obj_ref(int32_t sync_obj)
  491. {
  492. struct sync_table_row *row = NULL;
  493. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  494. return -EINVAL;
  495. row = sync_dev->sync_table + sync_obj;
  496. atomic_dec(&row->ref_cnt);
  497. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  498. return 0;
  499. }
  500. int cam_sync_destroy(int32_t sync_obj)
  501. {
  502. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj, NULL, NULL);
  503. }
  504. int cam_sync_check_valid(int32_t sync_obj)
  505. {
  506. struct sync_table_row *row = NULL;
  507. int rc;
  508. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  509. return -EINVAL;
  510. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  511. row = sync_dev->sync_table + sync_obj;
  512. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  513. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]",
  514. row->name,
  515. sync_obj);
  516. rc = -EINVAL;
  517. goto monitor_dump;
  518. }
  519. if (row->state == CAM_SYNC_STATE_INVALID) {
  520. CAM_ERR(CAM_SYNC,
  521. "Error: accessing an uninitialized sync obj = %s[%d]",
  522. row->name,
  523. sync_obj);
  524. rc = -EINVAL;
  525. goto monitor_dump;
  526. }
  527. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  528. return 0;
  529. monitor_dump:
  530. cam_sync_dump_monitor_array(row);
  531. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  532. return rc;
  533. }
  534. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  535. {
  536. unsigned long timeleft;
  537. int rc;
  538. struct sync_table_row *row = NULL;
  539. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  540. return -EINVAL;
  541. row = sync_dev->sync_table + sync_obj;
  542. if (row->state == CAM_SYNC_STATE_INVALID) {
  543. CAM_ERR(CAM_SYNC,
  544. "Error: accessing an uninitialized sync obj = %s[%d]",
  545. row->name,
  546. sync_obj);
  547. rc = -EINVAL;
  548. goto monitor_dump;
  549. }
  550. timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
  551. msecs_to_jiffies(timeout_ms));
  552. if (!timeleft) {
  553. CAM_ERR(CAM_SYNC,
  554. "Error: timed out for sync obj = %s[%d]", row->name, sync_obj);
  555. rc = -ETIMEDOUT;
  556. goto monitor_dump;
  557. } else {
  558. switch (row->state) {
  559. case CAM_SYNC_STATE_INVALID:
  560. case CAM_SYNC_STATE_ACTIVE:
  561. case CAM_SYNC_STATE_SIGNALED_ERROR:
  562. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  563. CAM_ERR(CAM_SYNC,
  564. "Error: Wait on invalid state = %d, obj = %d, name = %s",
  565. row->state, sync_obj, row->name);
  566. rc = -EINVAL;
  567. goto monitor_dump;
  568. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  569. rc = 0;
  570. break;
  571. default:
  572. rc = -EINVAL;
  573. goto monitor_dump;
  574. }
  575. }
  576. return rc;
  577. monitor_dump:
  578. cam_sync_dump_monitor_array(row);
  579. return rc;
  580. }
  581. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  582. {
  583. struct cam_sync_info sync_create;
  584. int result;
  585. if (k_ioctl->size != sizeof(struct cam_sync_info))
  586. return -EINVAL;
  587. if (!k_ioctl->ioctl_ptr)
  588. return -EINVAL;
  589. if (copy_from_user(&sync_create,
  590. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  591. k_ioctl->size))
  592. return -EFAULT;
  593. sync_create.name[SYNC_DEBUG_NAME_LEN] = '\0';
  594. result = cam_sync_create(&sync_create.sync_obj,
  595. sync_create.name);
  596. if (!result)
  597. if (copy_to_user(
  598. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  599. &sync_create,
  600. k_ioctl->size))
  601. return -EFAULT;
  602. return result;
  603. }
  604. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  605. {
  606. int rc;
  607. struct cam_sync_signal sync_signal;
  608. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  609. return -EINVAL;
  610. if (!k_ioctl->ioctl_ptr)
  611. return -EINVAL;
  612. if (copy_from_user(&sync_signal,
  613. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  614. k_ioctl->size))
  615. return -EFAULT;
  616. /* need to get ref for UMD signaled fences */
  617. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  618. if (rc) {
  619. CAM_DBG(CAM_SYNC,
  620. "Error: cannot signal an uninitialized sync obj = %d",
  621. sync_signal.sync_obj);
  622. return rc;
  623. }
  624. return cam_sync_signal(sync_signal.sync_obj,
  625. sync_signal.sync_state,
  626. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  627. }
  628. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  629. {
  630. struct cam_sync_merge sync_merge;
  631. uint32_t *sync_objs;
  632. uint32_t num_objs;
  633. uint32_t size;
  634. int result;
  635. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  636. return -EINVAL;
  637. if (!k_ioctl->ioctl_ptr)
  638. return -EINVAL;
  639. if (copy_from_user(&sync_merge,
  640. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  641. k_ioctl->size))
  642. return -EFAULT;
  643. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  644. return -EINVAL;
  645. size = sizeof(uint32_t) * sync_merge.num_objs;
  646. sync_objs = kzalloc(size, GFP_ATOMIC);
  647. if (!sync_objs)
  648. return -ENOMEM;
  649. if (copy_from_user(sync_objs,
  650. u64_to_user_ptr(sync_merge.sync_objs),
  651. sizeof(uint32_t) * sync_merge.num_objs)) {
  652. kfree(sync_objs);
  653. return -EFAULT;
  654. }
  655. num_objs = sync_merge.num_objs;
  656. result = cam_sync_merge(sync_objs,
  657. num_objs,
  658. &sync_merge.merged);
  659. if (!result)
  660. if (copy_to_user(
  661. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  662. &sync_merge,
  663. k_ioctl->size)) {
  664. kfree(sync_objs);
  665. return -EFAULT;
  666. }
  667. kfree(sync_objs);
  668. return result;
  669. }
  670. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  671. {
  672. struct cam_sync_wait sync_wait;
  673. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  674. return -EINVAL;
  675. if (!k_ioctl->ioctl_ptr)
  676. return -EINVAL;
  677. if (copy_from_user(&sync_wait,
  678. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  679. k_ioctl->size))
  680. return -EFAULT;
  681. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  682. sync_wait.timeout_ms);
  683. return 0;
  684. }
  685. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  686. {
  687. struct cam_sync_info sync_create;
  688. if (k_ioctl->size != sizeof(struct cam_sync_info))
  689. return -EINVAL;
  690. if (!k_ioctl->ioctl_ptr)
  691. return -EINVAL;
  692. if (copy_from_user(&sync_create,
  693. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  694. k_ioctl->size))
  695. return -EFAULT;
  696. return cam_sync_destroy(sync_create.sync_obj);
  697. }
  698. static int cam_sync_handle_register_user_payload(
  699. struct cam_private_ioctl_arg *k_ioctl)
  700. {
  701. struct cam_sync_userpayload_info userpayload_info;
  702. struct sync_user_payload *user_payload_kernel;
  703. struct sync_user_payload *user_payload_iter;
  704. struct sync_user_payload *temp_upayload_kernel;
  705. uint32_t sync_obj;
  706. struct sync_table_row *row = NULL;
  707. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  708. return -EINVAL;
  709. if (!k_ioctl->ioctl_ptr)
  710. return -EINVAL;
  711. if (copy_from_user(&userpayload_info,
  712. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  713. k_ioctl->size))
  714. return -EFAULT;
  715. sync_obj = userpayload_info.sync_obj;
  716. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  717. return -EINVAL;
  718. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  719. if (!user_payload_kernel)
  720. return -ENOMEM;
  721. memcpy(user_payload_kernel->payload_data,
  722. userpayload_info.payload,
  723. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  724. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  725. row = sync_dev->sync_table + sync_obj;
  726. if (row->state == CAM_SYNC_STATE_INVALID) {
  727. CAM_ERR(CAM_SYNC,
  728. "Error: accessing an uninitialized sync obj = %s[%d]",
  729. row->name,
  730. sync_obj);
  731. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  732. kfree(user_payload_kernel);
  733. return -EINVAL;
  734. }
  735. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  736. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  737. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  738. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  739. &cam_sync_monitor_mask))
  740. cam_generic_fence_update_monitor_array(sync_obj,
  741. &sync_dev->table_lock, sync_dev->mon_data,
  742. CAM_FENCE_OP_SKIP_REGISTER_CB);
  743. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  744. sync_obj,
  745. row->state,
  746. user_payload_kernel->payload_data,
  747. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  748. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  749. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  750. kfree(user_payload_kernel);
  751. return 0;
  752. }
  753. list_for_each_entry_safe(user_payload_iter,
  754. temp_upayload_kernel,
  755. &row->user_payload_list,
  756. list) {
  757. if (user_payload_iter->payload_data[0] ==
  758. user_payload_kernel->payload_data[0] &&
  759. user_payload_iter->payload_data[1] ==
  760. user_payload_kernel->payload_data[1]) {
  761. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  762. &cam_sync_monitor_mask))
  763. cam_generic_fence_update_monitor_array(sync_obj,
  764. &sync_dev->table_lock, sync_dev->mon_data,
  765. CAM_FENCE_OP_ALREADY_REGISTERED_CB);
  766. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  767. kfree(user_payload_kernel);
  768. return -EALREADY;
  769. }
  770. }
  771. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  772. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
  773. cam_generic_fence_update_monitor_array(sync_obj,
  774. &sync_dev->table_lock, sync_dev->mon_data,
  775. CAM_FENCE_OP_REGISTER_CB);
  776. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  777. return 0;
  778. }
  779. static int cam_sync_handle_deregister_user_payload(
  780. struct cam_private_ioctl_arg *k_ioctl)
  781. {
  782. struct cam_sync_userpayload_info userpayload_info;
  783. struct sync_user_payload *user_payload_kernel, *temp;
  784. uint32_t sync_obj;
  785. struct sync_table_row *row = NULL;
  786. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  787. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  788. return -EINVAL;
  789. }
  790. if (!k_ioctl->ioctl_ptr) {
  791. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  792. return -EINVAL;
  793. }
  794. if (copy_from_user(&userpayload_info,
  795. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  796. k_ioctl->size))
  797. return -EFAULT;
  798. sync_obj = userpayload_info.sync_obj;
  799. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  800. return -EINVAL;
  801. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  802. row = sync_dev->sync_table + sync_obj;
  803. if (row->state == CAM_SYNC_STATE_INVALID) {
  804. CAM_ERR(CAM_SYNC,
  805. "Error: accessing an uninitialized sync obj = %s[%d]",
  806. row->name,
  807. sync_obj);
  808. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  809. return -EINVAL;
  810. }
  811. list_for_each_entry_safe(user_payload_kernel, temp,
  812. &row->user_payload_list, list) {
  813. if (user_payload_kernel->payload_data[0] ==
  814. userpayload_info.payload[0] &&
  815. user_payload_kernel->payload_data[1] ==
  816. userpayload_info.payload[1]) {
  817. list_del_init(&user_payload_kernel->list);
  818. kfree(user_payload_kernel);
  819. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  820. &cam_sync_monitor_mask))
  821. cam_generic_fence_update_monitor_array(sync_obj,
  822. &sync_dev->table_lock, sync_dev->mon_data,
  823. CAM_FENCE_OP_UNREGISTER_CB);
  824. }
  825. }
  826. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  827. return 0;
  828. }
  829. static int cam_sync_dma_fence_cb(
  830. int32_t sync_obj,
  831. struct cam_dma_fence_signal_sync_obj *signal_sync_obj)
  832. {
  833. int32_t rc;
  834. int32_t status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  835. struct sync_table_row *row = NULL;
  836. struct list_head parents_list;
  837. if (!signal_sync_obj) {
  838. CAM_ERR(CAM_SYNC, "Invalid signal info args");
  839. return -EINVAL;
  840. }
  841. /* Validate sync object range */
  842. if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) {
  843. CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
  844. return -EINVAL;
  845. }
  846. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  847. row = sync_dev->sync_table + sync_obj;
  848. /* Validate if sync obj has a dma fence association */
  849. if (!test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  850. CAM_ERR(CAM_SYNC,
  851. "sync obj = %d[%s] has no associated dma fence ext_fence_mask = 0x%x",
  852. sync_obj, row->name, row->ext_fence_mask);
  853. rc = -EINVAL;
  854. goto end;
  855. }
  856. /* Validate if we are signaling the right sync obj based on dma fence fd */
  857. if (row->dma_fence_info.dma_fence_fd != signal_sync_obj->fd) {
  858. CAM_ERR(CAM_SYNC,
  859. "sync obj: %d[%s] is associated with a different fd: %d, signaling for fd: %d",
  860. sync_obj, row->name, row->dma_fence_info.dma_fence_fd, signal_sync_obj->fd);
  861. rc = -EINVAL;
  862. goto end;
  863. }
  864. /* Check for error status */
  865. if (signal_sync_obj->status < 0) {
  866. if (signal_sync_obj->status == -ECANCELED)
  867. status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  868. else
  869. status = CAM_SYNC_STATE_SIGNALED_ERROR;
  870. }
  871. rc = cam_sync_signal_validate_util(sync_obj, status);
  872. if (rc) {
  873. CAM_ERR(CAM_SYNC,
  874. "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d",
  875. sync_obj, row->name, status, rc);
  876. goto end;
  877. }
  878. /* Adding dma fence reference on sync */
  879. atomic_inc(&row->ref_cnt);
  880. if (!atomic_dec_and_test(&row->ref_cnt))
  881. goto end;
  882. row->state = status;
  883. cam_sync_util_dispatch_signaled_cb(sync_obj, status, 0);
  884. INIT_LIST_HEAD(&parents_list);
  885. list_splice_init(&row->parents_list, &parents_list);
  886. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  887. if (list_empty(&parents_list))
  888. return 0;
  889. cam_sync_signal_parent_util(status, 0x0, &parents_list);
  890. return 0;
  891. end:
  892. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  893. return rc;
  894. }
  895. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  896. static int cam_sync_synx_obj_cb(int32_t sync_obj,
  897. struct cam_synx_obj_signal_sync_obj *signal_sync_obj)
  898. {
  899. int32_t rc;
  900. struct sync_table_row *row = NULL;
  901. struct list_head parents_list;
  902. if (!signal_sync_obj) {
  903. CAM_ERR(CAM_SYNC, "Invalid signal info args");
  904. return -EINVAL;
  905. }
  906. /* Validate sync object range */
  907. if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) {
  908. CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
  909. return -EINVAL;
  910. }
  911. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  912. row = sync_dev->sync_table + sync_obj;
  913. /* Validate if sync obj has a synx obj association */
  914. if (!test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  915. CAM_ERR(CAM_SYNC,
  916. "sync obj = %d[%s] has no associated synx obj ext_fence_mask = 0x%x",
  917. sync_obj, row->name, row->ext_fence_mask);
  918. rc = -EINVAL;
  919. goto end;
  920. }
  921. /* Validate if we are signaling the right sync obj based on synx handle */
  922. if (row->synx_obj_info.synx_obj != signal_sync_obj->synx_obj) {
  923. CAM_ERR(CAM_SYNC,
  924. "sync obj: %d[%s] is associated with a different synx obj: %d, signaling for synx obj: %d",
  925. sync_obj, row->name, row->synx_obj_info.synx_obj,
  926. signal_sync_obj->synx_obj);
  927. rc = -EINVAL;
  928. goto end;
  929. }
  930. rc = cam_sync_signal_validate_util(sync_obj, signal_sync_obj->status);
  931. if (rc) {
  932. CAM_ERR(CAM_SYNC,
  933. "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d",
  934. sync_obj, row->name, signal_sync_obj->status, rc);
  935. goto end;
  936. }
  937. /* Adding synx reference on sync */
  938. atomic_inc(&row->ref_cnt);
  939. if (!atomic_dec_and_test(&row->ref_cnt)) {
  940. CAM_DBG(CAM_SYNC, "Sync = %d[%s] fence still has references, synx_hdl = %d",
  941. sync_obj, row->name, signal_sync_obj->synx_obj);
  942. goto end;
  943. }
  944. row->state = signal_sync_obj->status;
  945. cam_sync_util_dispatch_signaled_cb(sync_obj, signal_sync_obj->status, 0);
  946. INIT_LIST_HEAD(&parents_list);
  947. list_splice_init(&row->parents_list, &parents_list);
  948. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  949. if (list_empty(&parents_list))
  950. return 0;
  951. cam_sync_signal_parent_util(signal_sync_obj->status, 0x0, &parents_list);
  952. CAM_DBG(CAM_SYNC,
  953. "Successfully signaled sync obj = %d with status = %d via synx obj = %d signal callback",
  954. sync_obj, signal_sync_obj->status, signal_sync_obj->synx_obj);
  955. return 0;
  956. end:
  957. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  958. return rc;
  959. }
  960. #endif
  961. static int cam_generic_fence_alloc_validate_input_info_util(
  962. struct cam_generic_fence_cmd_args *fence_cmd_args,
  963. struct cam_generic_fence_input_info **fence_input_info)
  964. {
  965. int rc = 0;
  966. struct cam_generic_fence_input_info *fence_input = NULL;
  967. uint32_t num_fences;
  968. size_t expected_size;
  969. *fence_input_info = NULL;
  970. if (fence_cmd_args->input_data_size !=
  971. sizeof(struct cam_generic_fence_input_info)) {
  972. CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx",
  973. sizeof(struct cam_generic_fence_input_info),
  974. fence_cmd_args->input_data_size);
  975. return -EINVAL;
  976. }
  977. fence_input = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  978. fence_cmd_args->input_data_size);
  979. if (IS_ERR_OR_NULL(fence_input)) {
  980. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  981. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  982. return -ENOMEM;
  983. }
  984. /* Validate num fences */
  985. num_fences = fence_input->num_fences_requested;
  986. if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) {
  987. CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching",
  988. num_fences);
  989. rc = -EINVAL;
  990. goto free_mem;
  991. }
  992. /* Validate sizes */
  993. expected_size = sizeof(struct cam_generic_fence_input_info) +
  994. ((num_fences - 1) * sizeof(struct cam_generic_fence_config));
  995. if ((uint32_t)expected_size != fence_cmd_args->input_data_size) {
  996. CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u",
  997. expected_size, fence_cmd_args->input_data_size, num_fences);
  998. rc = -EINVAL;
  999. goto free_mem;
  1000. }
  1001. *fence_input_info = fence_input;
  1002. return rc;
  1003. free_mem:
  1004. kfree(fence_input);
  1005. return rc;
  1006. }
  1007. static void cam_generic_fence_free_input_info_util(
  1008. struct cam_generic_fence_input_info **fence_input_info)
  1009. {
  1010. struct cam_generic_fence_input_info *fence_input = *fence_input_info;
  1011. kfree(fence_input);
  1012. *fence_input_info = NULL;
  1013. }
  1014. static int cam_generic_fence_handle_dma_create(
  1015. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1016. {
  1017. int rc, i, dma_fence_row_idx;
  1018. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1019. struct cam_generic_fence_config *fence_cfg = NULL;
  1020. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1021. if (rc || !fence_input_info) {
  1022. CAM_ERR(CAM_DMA_FENCE,
  1023. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1024. rc, fence_input_info);
  1025. return -EINVAL;
  1026. }
  1027. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1028. fence_cfg = &fence_input_info->fence_cfg[i];
  1029. fence_input_info->num_fences_processed++;
  1030. fence_cfg->reason_code = 0;
  1031. rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd,
  1032. &dma_fence_row_idx, fence_cfg->name);
  1033. if (rc) {
  1034. CAM_ERR(CAM_DMA_FENCE,
  1035. "Failed to create dma fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1036. i, rc, fence_input_info->num_fences_requested,
  1037. fence_input_info->num_fences_processed);
  1038. fence_cfg->reason_code = rc;
  1039. goto out_copy;
  1040. }
  1041. CAM_DBG(CAM_DMA_FENCE,
  1042. "Created dma_fence @ i: %d fence fd: %d[%s] num fences [requested: %u processed: %u] ",
  1043. i, fence_cfg->dma_fence_fd, fence_cfg->name,
  1044. fence_input_info->num_fences_requested,
  1045. fence_input_info->num_fences_processed);
  1046. }
  1047. out_copy:
  1048. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1049. fence_input_info, fence_cmd_args->input_data_size)) {
  1050. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  1051. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1052. rc = -EFAULT;
  1053. }
  1054. cam_generic_fence_free_input_info_util(&fence_input_info);
  1055. return rc;
  1056. }
  1057. static int cam_generic_fence_handle_dma_release(
  1058. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1059. {
  1060. int rc, i;
  1061. bool failed = false;
  1062. struct cam_dma_fence_release_params release_params;
  1063. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1064. struct cam_generic_fence_config *fence_cfg = NULL;
  1065. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1066. if (rc || !fence_input_info) {
  1067. CAM_ERR(CAM_DMA_FENCE,
  1068. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1069. rc, fence_input_info);
  1070. return -EINVAL;
  1071. }
  1072. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1073. fence_cfg = &fence_input_info->fence_cfg[i];
  1074. fence_input_info->num_fences_processed++;
  1075. fence_cfg->reason_code = 0;
  1076. release_params.use_row_idx = false;
  1077. release_params.u.dma_fence_fd = fence_cfg->dma_fence_fd;
  1078. rc = cam_dma_fence_release(&release_params);
  1079. if (rc) {
  1080. CAM_ERR(CAM_DMA_FENCE,
  1081. "Failed to destroy dma fence at index: %d fd: %d rc: %d num fences [requested: %u processed: %u]",
  1082. i, fence_cfg->dma_fence_fd, rc,
  1083. fence_input_info->num_fences_requested,
  1084. fence_input_info->num_fences_processed);
  1085. fence_cfg->reason_code = rc;
  1086. /* Continue to release other fences, but mark the call as failed */
  1087. failed = true;
  1088. continue;
  1089. }
  1090. CAM_DBG(CAM_DMA_FENCE,
  1091. "Released dma_fence @ i: %d fd: %d num fences [requested: %u processed: %u]",
  1092. i, fence_cfg->dma_fence_fd,
  1093. fence_input_info->num_fences_requested,
  1094. fence_input_info->num_fences_processed);
  1095. }
  1096. if (failed)
  1097. rc = -ENOMSG;
  1098. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1099. fence_input_info, fence_cmd_args->input_data_size)) {
  1100. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  1101. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1102. rc = -EFAULT;
  1103. }
  1104. cam_generic_fence_free_input_info_util(&fence_input_info);
  1105. return rc;
  1106. }
  1107. static int cam_generic_fence_handle_dma_import(
  1108. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1109. {
  1110. int32_t rc, i, dma_fence_row_idx;
  1111. struct dma_fence *fence = NULL;
  1112. struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
  1113. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1114. struct cam_generic_fence_config *fence_cfg = NULL;
  1115. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1116. if (rc || !fence_input_info) {
  1117. CAM_ERR(CAM_DMA_FENCE,
  1118. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1119. rc, fence_input_info);
  1120. return -EINVAL;
  1121. }
  1122. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1123. fence_cfg = &fence_input_info->fence_cfg[i];
  1124. fence_input_info->num_fences_processed++;
  1125. fence_cfg->reason_code = 0;
  1126. /* Check if fd is for a valid dma fence */
  1127. fence = cam_dma_fence_get_fence_from_fd(fence_cfg->dma_fence_fd,
  1128. &dma_fence_row_idx);
  1129. if (IS_ERR_OR_NULL(fence)) {
  1130. CAM_ERR(CAM_DMA_FENCE,
  1131. "Invalid dma fence for fd: %d", fence_cfg->dma_fence_fd);
  1132. fence_cfg->reason_code = -EINVAL;
  1133. goto out_copy;
  1134. }
  1135. dma_sync_create.dma_fence_row_idx = dma_fence_row_idx;
  1136. dma_sync_create.fd = fence_cfg->dma_fence_fd;
  1137. dma_sync_create.sync_created_with_dma = false;
  1138. /* Create new sync object and associate dma fence */
  1139. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1140. &dma_sync_create, NULL);
  1141. if (rc) {
  1142. fence_cfg->reason_code = rc;
  1143. /* put on the import refcnt */
  1144. cam_dma_fence_get_put_ref(false, dma_fence_row_idx);
  1145. goto out_copy;
  1146. }
  1147. /* Register a cb for dma fence */
  1148. rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj,
  1149. &dma_fence_row_idx, cam_sync_dma_fence_cb);
  1150. if (rc) {
  1151. CAM_ERR(CAM_DMA_FENCE,
  1152. "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d",
  1153. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc);
  1154. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1155. NULL, NULL);
  1156. fence_cfg->reason_code = rc;
  1157. goto out_copy;
  1158. }
  1159. CAM_DBG(CAM_DMA_FENCE,
  1160. "dma fence fd = %d imported for sync_obj = %d[%s] num fences [requested: %u processed: %u]",
  1161. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, fence_cfg->name,
  1162. fence_input_info->num_fences_requested,
  1163. fence_input_info->num_fences_processed);
  1164. }
  1165. out_copy:
  1166. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1167. fence_input_info, fence_cmd_args->input_data_size)) {
  1168. rc = -EFAULT;
  1169. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  1170. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1171. }
  1172. cam_generic_fence_free_input_info_util(&fence_input_info);
  1173. return rc;
  1174. }
  1175. static int cam_generic_fence_handle_dma_signal(
  1176. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1177. {
  1178. struct cam_dma_fence_signal signal_dma_fence;
  1179. if (fence_cmd_args->input_data_size != sizeof(struct cam_dma_fence_signal)) {
  1180. CAM_ERR(CAM_DMA_FENCE, "Size is invalid expected: 0x%llx actual: 0x%llx",
  1181. sizeof(struct cam_dma_fence_signal),
  1182. fence_cmd_args->input_data_size);
  1183. return -EINVAL;
  1184. }
  1185. if (copy_from_user(&signal_dma_fence, (void __user *)fence_cmd_args->input_handle,
  1186. fence_cmd_args->input_data_size))
  1187. return -EFAULT;
  1188. return cam_dma_fence_signal_fd(&signal_dma_fence);
  1189. }
  1190. static int cam_generic_fence_process_dma_fence_cmd(
  1191. uint32_t id,
  1192. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1193. {
  1194. int rc = -EINVAL;
  1195. switch (id) {
  1196. case CAM_GENERIC_FENCE_CREATE:
  1197. rc = cam_generic_fence_handle_dma_create(fence_cmd_args);
  1198. break;
  1199. case CAM_GENERIC_FENCE_RELEASE:
  1200. rc = cam_generic_fence_handle_dma_release(fence_cmd_args);
  1201. break;
  1202. case CAM_GENERIC_FENCE_IMPORT:
  1203. rc = cam_generic_fence_handle_dma_import(fence_cmd_args);
  1204. break;
  1205. case CAM_GENERIC_FENCE_SIGNAL:
  1206. rc = cam_generic_fence_handle_dma_signal(fence_cmd_args);
  1207. break;
  1208. default:
  1209. CAM_ERR(CAM_DMA_FENCE, "IOCTL cmd: %u not supported for dma fence", id);
  1210. break;
  1211. }
  1212. return rc;
  1213. }
  1214. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1215. static int cam_generic_fence_validate_signal_input_info_util(
  1216. int32_t fence_type,
  1217. struct cam_generic_fence_cmd_args *fence_cmd_args,
  1218. struct cam_generic_fence_signal_info **fence_signal_info,
  1219. void **fence_signal_data)
  1220. {
  1221. int rc = 0;
  1222. struct cam_generic_fence_signal_info *signal_info = NULL;
  1223. void *signal_data;
  1224. uint32_t num_fences;
  1225. size_t expected_size;
  1226. *fence_signal_info = NULL;
  1227. *fence_signal_data = NULL;
  1228. if (fence_cmd_args->input_data_size !=
  1229. sizeof(struct cam_generic_fence_signal_info)) {
  1230. CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx",
  1231. sizeof(struct cam_generic_fence_signal_info),
  1232. fence_cmd_args->input_data_size);
  1233. return -EINVAL;
  1234. }
  1235. signal_info = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1236. fence_cmd_args->input_data_size);
  1237. if (IS_ERR_OR_NULL(signal_info)) {
  1238. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  1239. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1240. return -ENOMEM;
  1241. }
  1242. /* Validate num fences */
  1243. num_fences = signal_info->num_fences_requested;
  1244. if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) {
  1245. CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching",
  1246. num_fences);
  1247. rc = -EINVAL;
  1248. goto free_mem;
  1249. }
  1250. if (signal_info->fence_handle_type != CAM_HANDLE_USER_POINTER) {
  1251. CAM_ERR(CAM_SYNC, "Invalid signal handle type: %d",
  1252. signal_info->fence_handle_type);
  1253. rc = -EINVAL;
  1254. goto free_mem;
  1255. }
  1256. /* Validate sizes */
  1257. switch (fence_type) {
  1258. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  1259. expected_size = sizeof(struct cam_sync_signal);
  1260. break;
  1261. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  1262. expected_size = sizeof(struct cam_synx_obj_signal);
  1263. break;
  1264. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  1265. expected_size = sizeof(struct cam_dma_fence_signal);
  1266. break;
  1267. default:
  1268. CAM_ERR(CAM_SYNC, "Unsupported fence type: %u", fence_type);
  1269. rc = -EINVAL;
  1270. goto free_mem;
  1271. }
  1272. if ((signal_info->fence_data_size) != (expected_size * num_fences)) {
  1273. CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u",
  1274. (expected_size * num_fences), signal_info->fence_data_size, num_fences);
  1275. rc = -EINVAL;
  1276. goto free_mem;
  1277. }
  1278. signal_data = memdup_user(u64_to_user_ptr(signal_info->fence_info_hdl),
  1279. signal_info->fence_data_size);
  1280. if (IS_ERR_OR_NULL(signal_data)) {
  1281. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  1282. signal_info->fence_info_hdl, signal_info->fence_data_size);
  1283. rc = -ENOMEM;
  1284. goto free_mem;
  1285. }
  1286. *fence_signal_info = signal_info;
  1287. *fence_signal_data = signal_data;
  1288. return rc;
  1289. free_mem:
  1290. kfree(signal_info);
  1291. return rc;
  1292. }
  1293. static void cam_generic_fence_free_signal_input_info_util(
  1294. struct cam_generic_fence_signal_info **fence_signal_info,
  1295. void **fence_signal_data)
  1296. {
  1297. void *signal_data = *fence_signal_data;
  1298. struct cam_generic_fence_signal_info *fence_input = *fence_signal_info;
  1299. kfree(signal_data);
  1300. kfree(fence_input);
  1301. *fence_signal_info = NULL;
  1302. *fence_signal_data = NULL;
  1303. }
  1304. static int cam_generic_fence_config_parse_params(
  1305. struct cam_generic_fence_config *fence_cfg,
  1306. int32_t requested_param_mask, int32_t *result)
  1307. {
  1308. uint32_t index = 0, num_entries;
  1309. if (!result) {
  1310. CAM_ERR(CAM_SYNC, "Invalid result hdl : %p", result);
  1311. return -EINVAL;
  1312. }
  1313. /* Assign to 0 by default */
  1314. *result = 0;
  1315. if (!fence_cfg->num_valid_params || !requested_param_mask) {
  1316. CAM_DBG(CAM_SYNC,
  1317. "No params configured num_valid = %d requested_mask = 0x%x",
  1318. fence_cfg->num_valid_params, requested_param_mask);
  1319. return 0;
  1320. }
  1321. if (!(fence_cfg->valid_param_mask & requested_param_mask)) {
  1322. CAM_DBG(CAM_SYNC,
  1323. "Requested parameter not set in additional param mask expecting: 0x%x actual: 0x%x",
  1324. requested_param_mask, fence_cfg->valid_param_mask);
  1325. return 0;
  1326. }
  1327. index = ffs(requested_param_mask) - 1;
  1328. num_entries = ARRAY_SIZE(fence_cfg->params);
  1329. if (index >= num_entries) {
  1330. CAM_DBG(CAM_SYNC,
  1331. "Obtained index %u from mask: 0x%x num_param_entries: %u, index exceeding max",
  1332. index, requested_param_mask, num_entries);
  1333. return 0;
  1334. }
  1335. *result = fence_cfg->params[index];
  1336. return 0;
  1337. }
  1338. static int cam_generic_fence_handle_synx_create(
  1339. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1340. {
  1341. int rc, i;
  1342. int32_t row_idx, fence_flag;
  1343. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1344. struct cam_generic_fence_config *fence_cfg = NULL;
  1345. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1346. if (rc || !fence_input_info) {
  1347. CAM_ERR(CAM_SYNX,
  1348. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1349. rc, fence_input_info);
  1350. return -EINVAL;
  1351. }
  1352. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1353. fence_cfg = &fence_input_info->fence_cfg[i];
  1354. fence_input_info->num_fences_processed++;
  1355. fence_cfg->reason_code = 0;
  1356. fence_flag = 0;
  1357. cam_generic_fence_config_parse_params(fence_cfg,
  1358. CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX, &fence_flag);
  1359. rc = cam_synx_obj_create(fence_cfg->name,
  1360. fence_flag, &fence_cfg->synx_obj, &row_idx);
  1361. if (rc) {
  1362. CAM_ERR(CAM_SYNX,
  1363. "Failed to create synx fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1364. i, rc, fence_input_info->num_fences_requested,
  1365. fence_input_info->num_fences_processed);
  1366. fence_cfg->reason_code = rc;
  1367. goto out_copy;
  1368. }
  1369. CAM_DBG(CAM_SYNX,
  1370. "Created synx fence @ i: %d synx_obj: %d[%s] num fences [requested: %u processed: %u] ",
  1371. i, fence_cfg->synx_obj, fence_cfg->name,
  1372. fence_input_info->num_fences_requested,
  1373. fence_input_info->num_fences_processed);
  1374. }
  1375. out_copy:
  1376. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1377. fence_input_info, fence_cmd_args->input_data_size)) {
  1378. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1379. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1380. rc = -EFAULT;
  1381. }
  1382. cam_generic_fence_free_input_info_util(&fence_input_info);
  1383. return rc;
  1384. }
  1385. static int cam_generic_fence_handle_synx_release(
  1386. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1387. {
  1388. int rc, i;
  1389. bool failed = false;
  1390. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1391. struct cam_generic_fence_config *fence_cfg = NULL;
  1392. struct cam_synx_obj_release_params synx_release_params;
  1393. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1394. if (rc || !fence_input_info) {
  1395. CAM_ERR(CAM_SYNX,
  1396. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1397. rc, fence_input_info);
  1398. return -EINVAL;
  1399. }
  1400. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1401. fence_cfg = &fence_input_info->fence_cfg[i];
  1402. fence_input_info->num_fences_processed++;
  1403. fence_cfg->reason_code = 0;
  1404. synx_release_params.use_row_idx = false;
  1405. synx_release_params.u.synx_obj = fence_cfg->synx_obj;
  1406. rc = cam_synx_obj_release(&synx_release_params);
  1407. if (rc) {
  1408. CAM_ERR(CAM_SYNX,
  1409. "Failed to release synx object at index: %d rc: %d num fences [requested: %u processed: %u]",
  1410. i, rc, fence_input_info->num_fences_requested,
  1411. fence_input_info->num_fences_processed);
  1412. fence_cfg->reason_code = rc;
  1413. /* Continue to release other fences, but mark the call as failed */
  1414. failed = true;
  1415. continue;
  1416. }
  1417. CAM_DBG(CAM_SYNX,
  1418. "Released synx object @ i: %d handle: %d num fences [requested: %u processed: %u]",
  1419. i, fence_cfg->synx_obj,
  1420. fence_input_info->num_fences_requested,
  1421. fence_input_info->num_fences_processed);
  1422. }
  1423. if (failed)
  1424. rc = -ENOMSG;
  1425. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1426. fence_input_info, fence_cmd_args->input_data_size)) {
  1427. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1428. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1429. rc = -EFAULT;
  1430. }
  1431. cam_generic_fence_free_input_info_util(&fence_input_info);
  1432. return rc;
  1433. }
  1434. static int cam_sync_synx_associate_obj(int32_t sync_obj, uint32_t synx_obj,
  1435. int32_t synx_obj_row_idx, bool *is_sync_obj_signaled)
  1436. {
  1437. int rc;
  1438. struct sync_table_row *row = NULL;
  1439. struct cam_synx_obj_signal signal_synx_obj;
  1440. rc = cam_sync_check_valid(sync_obj);
  1441. if (rc)
  1442. return rc;
  1443. row = sync_dev->sync_table + sync_obj;
  1444. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  1445. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  1446. signal_synx_obj.status = row->state;
  1447. signal_synx_obj.synx_obj = synx_obj;
  1448. *is_sync_obj_signaled = true;
  1449. goto signal_synx;
  1450. } else {
  1451. row->synx_obj_info.synx_obj_row_idx = synx_obj_row_idx;
  1452. row->synx_obj_info.sync_created_with_synx = false;
  1453. row->synx_obj_info.synx_obj = synx_obj;
  1454. set_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask);
  1455. CAM_DBG(CAM_SYNX, "sync_obj: %s[%d] associated with synx_obj: %d",
  1456. row->name, sync_obj, row->synx_obj_info.synx_obj);
  1457. }
  1458. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  1459. return rc;
  1460. signal_synx:
  1461. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  1462. return cam_synx_obj_signal_obj(&signal_synx_obj);
  1463. }
  1464. static int cam_generic_fence_handle_synx_import(
  1465. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1466. {
  1467. int32_t rc, i, synx_obj_row_idx;
  1468. struct sync_synx_obj_info synx_sync_create;
  1469. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1470. struct cam_generic_fence_config *fence_cfg = NULL;
  1471. bool is_sync_obj_signaled = false;
  1472. bool is_sync_obj_created = false;
  1473. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1474. if (rc || !fence_input_info) {
  1475. CAM_ERR(CAM_SYNX,
  1476. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1477. rc, fence_input_info);
  1478. return -EINVAL;
  1479. }
  1480. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1481. fence_cfg = &fence_input_info->fence_cfg[i];
  1482. fence_input_info->num_fences_processed++;
  1483. fence_cfg->reason_code = 0;
  1484. is_sync_obj_signaled = false;
  1485. is_sync_obj_created = false;
  1486. /* Check if synx handle is for a valid synx obj */
  1487. rc = cam_synx_obj_find_obj_in_table(fence_cfg->synx_obj,
  1488. &synx_obj_row_idx);
  1489. if (rc) {
  1490. CAM_ERR(CAM_SYNX,
  1491. "Invalid synx obj for handle: %d", fence_cfg->synx_obj);
  1492. fence_cfg->reason_code = -EINVAL;
  1493. goto out_copy;
  1494. }
  1495. if ((fence_cfg->sync_obj > 0) && (fence_cfg->sync_obj < CAM_SYNC_MAX_OBJS)) {
  1496. /* Associate synx object with existing sync object */
  1497. rc = cam_sync_synx_associate_obj(fence_cfg->sync_obj,
  1498. fence_cfg->synx_obj, synx_obj_row_idx,
  1499. &is_sync_obj_signaled);
  1500. } else {
  1501. /* Create new sync object and associate synx object */
  1502. synx_sync_create.sync_created_with_synx = false;
  1503. synx_sync_create.synx_obj = fence_cfg->synx_obj;
  1504. synx_sync_create.synx_obj_row_idx = synx_obj_row_idx;
  1505. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1506. NULL, &synx_sync_create);
  1507. is_sync_obj_created = true;
  1508. }
  1509. if (rc) {
  1510. fence_cfg->reason_code = rc;
  1511. goto out_copy;
  1512. }
  1513. if (!is_sync_obj_signaled) {
  1514. /* Register a cb for synx_obj */
  1515. rc = cam_synx_obj_register_cb(&fence_cfg->sync_obj,
  1516. synx_obj_row_idx, cam_sync_synx_obj_cb);
  1517. if (rc) {
  1518. CAM_ERR(CAM_SYNX,
  1519. "Failed to register cb for synx_obj: %d sync_obj: %d rc: %d",
  1520. fence_cfg->synx_obj, fence_cfg->sync_obj, rc);
  1521. if (is_sync_obj_created)
  1522. cam_sync_deinit_object(sync_dev->sync_table,
  1523. fence_cfg->sync_obj, NULL, NULL);
  1524. fence_cfg->reason_code = rc;
  1525. goto out_copy;
  1526. }
  1527. }
  1528. CAM_DBG(CAM_SYNX,
  1529. "synx_obj handle = %d imported for dma fence fd: %d sync_obj = %d[%s] num fences [requested: %u processed: %u]",
  1530. fence_cfg->synx_obj, fence_cfg->dma_fence_fd,
  1531. fence_cfg->sync_obj, fence_cfg->name,
  1532. fence_input_info->num_fences_requested,
  1533. fence_input_info->num_fences_processed);
  1534. }
  1535. out_copy:
  1536. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1537. fence_input_info, fence_cmd_args->input_data_size)) {
  1538. rc = -EFAULT;
  1539. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1540. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1541. }
  1542. cam_generic_fence_free_input_info_util(&fence_input_info);
  1543. return rc;
  1544. }
  1545. static int cam_generic_fence_handle_synx_signal(
  1546. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1547. {
  1548. int32_t rc, i;
  1549. struct cam_generic_fence_signal_info *fence_signal_info;
  1550. struct cam_synx_obj_signal *synx_signal_info;
  1551. rc = cam_generic_fence_validate_signal_input_info_util(
  1552. CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, fence_cmd_args,
  1553. &fence_signal_info, (void **)&synx_signal_info);
  1554. if (rc || !fence_signal_info || !synx_signal_info) {
  1555. CAM_ERR(CAM_SYNX,
  1556. "Fence input signal info validation failed rc: %d fence_input_info: %pK synx_signal_info: %pK",
  1557. rc, fence_signal_info, synx_signal_info);
  1558. return -EINVAL;
  1559. }
  1560. for (i = 0; i < fence_signal_info->num_fences_requested; i++) {
  1561. fence_signal_info->num_fences_processed++;
  1562. rc = cam_synx_obj_signal_obj(&synx_signal_info[i]);
  1563. if (rc) {
  1564. CAM_ERR(CAM_SYNX,
  1565. "Failed to signal for synx_obj: %d, rc: %d, status : %d",
  1566. synx_signal_info[i].synx_obj, rc,
  1567. synx_signal_info[i].status);
  1568. }
  1569. synx_signal_info[i].reason_code = rc;
  1570. }
  1571. if (copy_to_user(u64_to_user_ptr(fence_signal_info->fence_info_hdl), synx_signal_info,
  1572. fence_signal_info->fence_data_size)) {
  1573. rc = -EFAULT;
  1574. CAM_ERR(CAM_SYNX, "copy to user for signal data failed hdl: %d size: 0x%x",
  1575. fence_cmd_args->input_handle,
  1576. (sizeof(struct cam_synx_obj_signal) *
  1577. fence_signal_info->num_fences_requested));
  1578. goto end;
  1579. }
  1580. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1581. fence_signal_info, sizeof(struct cam_generic_fence_signal_info))) {
  1582. rc = -EFAULT;
  1583. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1584. fence_cmd_args->input_handle,
  1585. sizeof(struct cam_generic_fence_signal_info));
  1586. }
  1587. end:
  1588. cam_generic_fence_free_signal_input_info_util(&fence_signal_info,
  1589. (void **)&synx_signal_info);
  1590. return rc;
  1591. }
  1592. static int cam_generic_fence_process_synx_obj_cmd(
  1593. uint32_t id,
  1594. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1595. {
  1596. int rc = -EINVAL;
  1597. switch (id) {
  1598. case CAM_GENERIC_FENCE_CREATE:
  1599. rc = cam_generic_fence_handle_synx_create(fence_cmd_args);
  1600. break;
  1601. case CAM_GENERIC_FENCE_RELEASE:
  1602. rc = cam_generic_fence_handle_synx_release(fence_cmd_args);
  1603. break;
  1604. case CAM_GENERIC_FENCE_IMPORT:
  1605. rc = cam_generic_fence_handle_synx_import(fence_cmd_args);
  1606. break;
  1607. case CAM_GENERIC_FENCE_SIGNAL:
  1608. rc = cam_generic_fence_handle_synx_signal(fence_cmd_args);
  1609. break;
  1610. default:
  1611. CAM_ERR(CAM_SYNX, "IOCTL cmd: %u not supported for synx object", id);
  1612. break;
  1613. }
  1614. return rc;
  1615. }
  1616. #endif
  1617. static int cam_generic_fence_handle_sync_create(
  1618. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1619. {
  1620. int rc, i, dma_fence_row_idx;
  1621. bool dma_fence_created;
  1622. unsigned long fence_sel_mask;
  1623. struct cam_dma_fence_release_params release_params;
  1624. struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
  1625. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1626. struct cam_generic_fence_config *fence_cfg = NULL;
  1627. bool synx_obj_created;
  1628. struct sync_synx_obj_info synx_obj_create;
  1629. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1630. int32_t fence_flag;
  1631. int32_t synx_obj_row_idx;
  1632. struct cam_synx_obj_release_params synx_release_params;
  1633. struct dma_fence *dma_fence_ptr;
  1634. #endif
  1635. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1636. if (rc || !fence_input_info) {
  1637. CAM_ERR(CAM_SYNC,
  1638. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1639. rc, fence_input_info);
  1640. return -EINVAL;
  1641. }
  1642. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1643. fence_cfg = &fence_input_info->fence_cfg[i];
  1644. fence_input_info->num_fences_processed++;
  1645. fence_cfg->reason_code = 0;
  1646. /* Reset flag */
  1647. dma_fence_created = false;
  1648. synx_obj_created = false;
  1649. fence_sel_mask = fence_cfg->fence_sel_mask;
  1650. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) {
  1651. rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd,
  1652. &dma_fence_row_idx, fence_cfg->name);
  1653. if (rc) {
  1654. CAM_ERR(CAM_SYNC,
  1655. "Failed to create dma fence at index: %d rc: %d num_fences: %u",
  1656. i, rc, fence_input_info->num_fences_requested);
  1657. fence_cfg->reason_code = rc;
  1658. goto out_copy;
  1659. }
  1660. dma_sync_create.dma_fence_row_idx = dma_fence_row_idx;
  1661. dma_sync_create.fd = fence_cfg->dma_fence_fd;
  1662. dma_sync_create.sync_created_with_dma = true;
  1663. dma_fence_created = true;
  1664. }
  1665. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1666. /* Create a synx object */
  1667. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) {
  1668. if (dma_fence_created) {
  1669. dma_fence_ptr = cam_dma_fence_get_fence_from_fd(
  1670. dma_sync_create.fd, &dma_fence_row_idx);
  1671. rc = cam_synx_obj_import_dma_fence(fence_cfg->name,
  1672. fence_cfg->params[0], dma_fence_ptr,
  1673. &fence_cfg->synx_obj, &synx_obj_row_idx);
  1674. } else {
  1675. cam_generic_fence_config_parse_params(fence_cfg,
  1676. CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX, &fence_flag);
  1677. rc = cam_synx_obj_create(fence_cfg->name,
  1678. fence_flag, &fence_cfg->synx_obj,
  1679. &synx_obj_row_idx);
  1680. }
  1681. if (rc) {
  1682. CAM_ERR(CAM_SYNC,
  1683. "Failed to create/import synx obj at index: %d rc: %d num_fences: %u",
  1684. i, rc, fence_input_info->num_fences_requested);
  1685. /* Release dma fence */
  1686. if (dma_fence_created) {
  1687. release_params.use_row_idx = true;
  1688. release_params.u.dma_row_idx = dma_fence_row_idx;
  1689. cam_dma_fence_release(&release_params);
  1690. }
  1691. /* Release synx obj */
  1692. if (synx_obj_created) {
  1693. synx_release_params.use_row_idx = true;
  1694. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1695. cam_synx_obj_release(&synx_release_params);
  1696. }
  1697. goto out_copy;
  1698. }
  1699. synx_obj_create.sync_created_with_synx = true;
  1700. synx_obj_create.synx_obj = fence_cfg->synx_obj;
  1701. synx_obj_create.synx_obj_row_idx = synx_obj_row_idx;
  1702. synx_obj_created = true;
  1703. }
  1704. #endif
  1705. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1706. (dma_fence_created ? &dma_sync_create : NULL),
  1707. (synx_obj_created ? &synx_obj_create : NULL));
  1708. if (rc) {
  1709. fence_cfg->reason_code = rc;
  1710. CAM_ERR(CAM_SYNC,
  1711. "Failed to create sync obj at index: %d rc: %d num_fences: %u",
  1712. i, rc, fence_input_info->num_fences_requested);
  1713. /* Release dma fence */
  1714. if (dma_fence_created) {
  1715. release_params.use_row_idx = true;
  1716. release_params.u.dma_row_idx = dma_fence_row_idx;
  1717. cam_dma_fence_release(&release_params);
  1718. }
  1719. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1720. /* Release synx obj */
  1721. if (synx_obj_created) {
  1722. synx_release_params.use_row_idx = true;
  1723. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1724. cam_synx_obj_release(&synx_release_params);
  1725. }
  1726. #endif
  1727. goto out_copy;
  1728. }
  1729. /* Register dma fence cb */
  1730. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) {
  1731. rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj,
  1732. &dma_fence_row_idx, cam_sync_dma_fence_cb);
  1733. if (rc) {
  1734. CAM_ERR(CAM_SYNC,
  1735. "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d",
  1736. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc);
  1737. fence_cfg->reason_code = rc;
  1738. /* Destroy sync obj */
  1739. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1740. NULL, NULL);
  1741. /* Release dma fence */
  1742. if (dma_fence_created) {
  1743. release_params.use_row_idx = true;
  1744. release_params.u.dma_row_idx = dma_fence_row_idx;
  1745. cam_dma_fence_release(&release_params);
  1746. }
  1747. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1748. /* Release synx obj */
  1749. if (synx_obj_created) {
  1750. synx_release_params.use_row_idx = true;
  1751. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1752. cam_synx_obj_release(&synx_release_params);
  1753. }
  1754. #endif
  1755. goto out_copy;
  1756. }
  1757. }
  1758. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1759. /* Register synx object callback */
  1760. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) {
  1761. rc = cam_synx_obj_register_cb(&fence_cfg->sync_obj,
  1762. synx_obj_row_idx, cam_sync_synx_obj_cb);
  1763. if (rc) {
  1764. CAM_ERR(CAM_SYNC,
  1765. "Failed to register cb for synx_obj: %d sync_obj: %d rc: %d",
  1766. fence_cfg->synx_obj, fence_cfg->sync_obj, rc);
  1767. fence_cfg->reason_code = rc;
  1768. /* Destroy sync obj */
  1769. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1770. NULL, NULL);
  1771. /* Release dma fence */
  1772. if (dma_fence_created) {
  1773. release_params.use_row_idx = true;
  1774. release_params.u.dma_row_idx = dma_fence_row_idx;
  1775. cam_dma_fence_release(&release_params);
  1776. }
  1777. /* Release synx obj */
  1778. if (synx_obj_created) {
  1779. synx_release_params.use_row_idx = true;
  1780. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1781. cam_synx_obj_release(&synx_release_params);
  1782. }
  1783. goto out_copy;
  1784. }
  1785. }
  1786. #endif
  1787. CAM_DBG(CAM_SYNC,
  1788. "Created sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d num fences [requested: %u processed: %u]",
  1789. fence_cfg->sync_obj, fence_cfg->name,
  1790. fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd,
  1791. fence_input_info->num_fences_requested,
  1792. fence_input_info->num_fences_processed);
  1793. }
  1794. out_copy:
  1795. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1796. fence_input_info, fence_cmd_args->input_data_size)) {
  1797. rc = -EFAULT;
  1798. CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x",
  1799. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1800. }
  1801. cam_generic_fence_free_input_info_util(&fence_input_info);
  1802. return rc;
  1803. }
  1804. static int cam_generic_fence_handle_sync_release(
  1805. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1806. {
  1807. bool failed = false;
  1808. int rc, i;
  1809. unsigned long fence_sel_mask;
  1810. struct cam_sync_check_for_dma_release check_for_dma_release;
  1811. struct cam_dma_fence_release_params release_params;
  1812. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1813. struct cam_generic_fence_config *fence_cfg = NULL;
  1814. struct cam_sync_check_for_synx_release check_for_synx_release;
  1815. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1816. struct cam_synx_obj_release_params synx_release_params;
  1817. #endif
  1818. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1819. if (rc || !fence_input_info) {
  1820. CAM_ERR(CAM_SYNC,
  1821. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1822. rc, fence_input_info);
  1823. return -EINVAL;
  1824. }
  1825. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1826. fence_cfg = &fence_input_info->fence_cfg[i];
  1827. fence_input_info->num_fences_processed++;
  1828. /* Reset fields */
  1829. fence_cfg->reason_code = 0;
  1830. check_for_dma_release.sync_created_with_dma = false;
  1831. check_for_dma_release.dma_fence_fd = fence_cfg->dma_fence_fd;
  1832. check_for_synx_release.sync_created_with_synx = false;
  1833. check_for_synx_release.synx_obj = fence_cfg->synx_obj;
  1834. rc = cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1835. &check_for_dma_release, &check_for_synx_release);
  1836. if (rc) {
  1837. fence_cfg->reason_code = rc;
  1838. failed = true;
  1839. CAM_ERR(CAM_SYNC,
  1840. "Failed to release sync obj at index: %d rc: %d num_fences [requested: %u processed: %u]",
  1841. i, rc, fence_input_info->num_fences_requested,
  1842. fence_input_info->num_fences_processed);
  1843. }
  1844. fence_sel_mask = fence_cfg->fence_sel_mask;
  1845. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) {
  1846. if (!check_for_dma_release.sync_created_with_dma) {
  1847. CAM_ERR(CAM_SYNC,
  1848. "Failed to release dma fence fd: %d with sync_obj: %d, not created together",
  1849. fence_cfg->dma_fence_fd, fence_cfg->sync_obj);
  1850. failed = true;
  1851. fence_cfg->reason_code = -EPERM;
  1852. continue;
  1853. }
  1854. release_params.use_row_idx = true;
  1855. release_params.u.dma_row_idx = check_for_dma_release.dma_fence_row_idx;
  1856. rc = cam_dma_fence_release(&release_params);
  1857. if (rc) {
  1858. CAM_ERR(CAM_SYNC,
  1859. "Failed to destroy dma fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1860. i, rc, fence_input_info->num_fences_requested,
  1861. fence_input_info->num_fences_processed);
  1862. fence_cfg->reason_code = rc;
  1863. failed = true;
  1864. continue;
  1865. }
  1866. }
  1867. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1868. /* Release associated synx obj */
  1869. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) {
  1870. if (!check_for_synx_release.sync_created_with_synx) {
  1871. CAM_ERR(CAM_SYNC,
  1872. "Failed to release synx_obj: %d with sync_obj: %d, not created together",
  1873. fence_cfg->synx_obj, fence_cfg->sync_obj);
  1874. failed = true;
  1875. fence_cfg->reason_code = -EPERM;
  1876. continue;
  1877. }
  1878. synx_release_params.use_row_idx = true;
  1879. synx_release_params.u.synx_row_idx =
  1880. check_for_synx_release.synx_obj_row_idx;
  1881. rc = cam_synx_obj_release(&synx_release_params);
  1882. if (rc) {
  1883. CAM_ERR(CAM_SYNC,
  1884. "Failed to destroy synx_obj at index: %d rc: %d num fences [requested: %u processed: %u]",
  1885. i, rc, fence_input_info->num_fences_requested,
  1886. fence_input_info->num_fences_processed);
  1887. fence_cfg->reason_code = rc;
  1888. failed = true;
  1889. continue;
  1890. }
  1891. }
  1892. #endif
  1893. CAM_DBG(CAM_SYNC,
  1894. "Released sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d synx_obj: %d num fences [requested: %u processed: %u]",
  1895. fence_cfg->sync_obj, fence_cfg->name,
  1896. fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd, fence_cfg->synx_obj,
  1897. fence_input_info->num_fences_requested,
  1898. fence_input_info->num_fences_processed);
  1899. }
  1900. if (failed)
  1901. rc = -ENOMSG;
  1902. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1903. fence_input_info, fence_cmd_args->input_data_size)) {
  1904. rc = -EFAULT;
  1905. CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x",
  1906. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1907. }
  1908. cam_generic_fence_free_input_info_util(&fence_input_info);
  1909. return rc;
  1910. }
  1911. static int cam_generic_fence_process_sync_obj_cmd(
  1912. uint32_t id,
  1913. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1914. {
  1915. int rc = -EINVAL;
  1916. switch (id) {
  1917. case CAM_GENERIC_FENCE_CREATE:
  1918. rc = cam_generic_fence_handle_sync_create(fence_cmd_args);
  1919. break;
  1920. case CAM_GENERIC_FENCE_RELEASE:
  1921. rc = cam_generic_fence_handle_sync_release(fence_cmd_args);
  1922. break;
  1923. default:
  1924. CAM_ERR(CAM_SYNC, "IOCTL cmd: %u not supported for sync object", id);
  1925. break;
  1926. }
  1927. return rc;
  1928. }
  1929. static int cam_generic_fence_parser(
  1930. struct cam_private_ioctl_arg *k_ioctl)
  1931. {
  1932. int rc;
  1933. struct cam_generic_fence_cmd_args fence_cmd_args;
  1934. if (!k_ioctl->ioctl_ptr) {
  1935. CAM_ERR(CAM_SYNC, "Invalid args input ptr: %p",
  1936. k_ioctl->ioctl_ptr);
  1937. return -EINVAL;
  1938. }
  1939. if (k_ioctl->size != sizeof(struct cam_generic_fence_cmd_args)) {
  1940. CAM_ERR(CAM_SYNC, "Size mismatch expected: 0x%llx actual: 0x%llx",
  1941. sizeof(struct cam_generic_fence_cmd_args), k_ioctl->size);
  1942. return -EINVAL;
  1943. }
  1944. if (copy_from_user(&fence_cmd_args, u64_to_user_ptr(k_ioctl->ioctl_ptr),
  1945. sizeof(fence_cmd_args))) {
  1946. CAM_ERR(CAM_SYNC, "copy from user failed for input ptr: %pK",
  1947. k_ioctl->ioctl_ptr);
  1948. return -EFAULT;
  1949. }
  1950. if (fence_cmd_args.input_handle_type != CAM_HANDLE_USER_POINTER) {
  1951. CAM_ERR(CAM_SYNC, "Invalid handle type: %u",
  1952. fence_cmd_args.input_handle_type);
  1953. return -EINVAL;
  1954. }
  1955. switch (fence_cmd_args.fence_type) {
  1956. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  1957. rc = cam_generic_fence_process_sync_obj_cmd(k_ioctl->id, &fence_cmd_args);
  1958. break;
  1959. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  1960. rc = cam_generic_fence_process_dma_fence_cmd(k_ioctl->id, &fence_cmd_args);
  1961. break;
  1962. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1963. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  1964. rc = cam_generic_fence_process_synx_obj_cmd(k_ioctl->id, &fence_cmd_args);
  1965. break;
  1966. #endif
  1967. default:
  1968. rc = -EINVAL;
  1969. CAM_ERR(CAM_SYNC, "fence type: 0x%x handling not supported",
  1970. fence_cmd_args.fence_type);
  1971. break;
  1972. }
  1973. return rc;
  1974. }
  1975. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  1976. bool valid_prio, unsigned int cmd, void *arg)
  1977. {
  1978. int32_t rc;
  1979. struct sync_device *sync_dev = video_drvdata(filep);
  1980. struct cam_private_ioctl_arg k_ioctl;
  1981. if (!sync_dev) {
  1982. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  1983. return -EINVAL;
  1984. }
  1985. if (!arg)
  1986. return -EINVAL;
  1987. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  1988. return -ENOIOCTLCMD;
  1989. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  1990. switch (k_ioctl.id) {
  1991. case CAM_SYNC_CREATE:
  1992. rc = cam_sync_handle_create(&k_ioctl);
  1993. break;
  1994. case CAM_SYNC_DESTROY:
  1995. rc = cam_sync_handle_destroy(&k_ioctl);
  1996. break;
  1997. case CAM_SYNC_REGISTER_PAYLOAD:
  1998. rc = cam_sync_handle_register_user_payload(
  1999. &k_ioctl);
  2000. break;
  2001. case CAM_SYNC_DEREGISTER_PAYLOAD:
  2002. rc = cam_sync_handle_deregister_user_payload(
  2003. &k_ioctl);
  2004. break;
  2005. case CAM_SYNC_SIGNAL:
  2006. rc = cam_sync_handle_signal(&k_ioctl);
  2007. break;
  2008. case CAM_SYNC_MERGE:
  2009. rc = cam_sync_handle_merge(&k_ioctl);
  2010. break;
  2011. case CAM_SYNC_WAIT:
  2012. rc = cam_sync_handle_wait(&k_ioctl);
  2013. ((struct cam_private_ioctl_arg *)arg)->result =
  2014. k_ioctl.result;
  2015. break;
  2016. case CAM_GENERIC_FENCE_CREATE:
  2017. case CAM_GENERIC_FENCE_RELEASE:
  2018. case CAM_GENERIC_FENCE_IMPORT:
  2019. case CAM_GENERIC_FENCE_SIGNAL:
  2020. rc = cam_generic_fence_parser(&k_ioctl);
  2021. break;
  2022. default:
  2023. rc = -ENOIOCTLCMD;
  2024. }
  2025. return rc;
  2026. }
  2027. static unsigned int cam_sync_poll(struct file *f,
  2028. struct poll_table_struct *pll_table)
  2029. {
  2030. int rc = 0;
  2031. struct v4l2_fh *eventq = f->private_data;
  2032. if (!eventq)
  2033. return -EINVAL;
  2034. poll_wait(f, &eventq->wait, pll_table);
  2035. if (v4l2_event_pending(eventq))
  2036. rc = POLLPRI;
  2037. return rc;
  2038. }
  2039. static int cam_sync_open(struct file *filep)
  2040. {
  2041. int rc;
  2042. struct sync_device *sync_dev = video_drvdata(filep);
  2043. if (!sync_dev) {
  2044. CAM_ERR(CAM_SYNC, "Sync device NULL");
  2045. return -ENODEV;
  2046. }
  2047. mutex_lock(&sync_dev->table_lock);
  2048. if (sync_dev->open_cnt >= 1) {
  2049. mutex_unlock(&sync_dev->table_lock);
  2050. return -EALREADY;
  2051. }
  2052. rc = v4l2_fh_open(filep);
  2053. if (!rc) {
  2054. sync_dev->open_cnt++;
  2055. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2056. cam_synx_obj_open();
  2057. #endif
  2058. cam_dma_fence_open();
  2059. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  2060. sync_dev->cam_sync_eventq = filep->private_data;
  2061. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  2062. } else {
  2063. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  2064. }
  2065. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  2066. sync_dev->mon_data = kzalloc(
  2067. sizeof(struct cam_generic_fence_monitor_data *) *
  2068. CAM_SYNC_MONITOR_TABLE_SIZE, GFP_KERNEL);
  2069. if (!sync_dev->mon_data) {
  2070. CAM_WARN(CAM_SYNC, "Failed to allocate memory %d",
  2071. sizeof(struct cam_generic_fence_monitor_data *) *
  2072. CAM_SYNC_MONITOR_TABLE_SIZE);
  2073. }
  2074. }
  2075. mutex_unlock(&sync_dev->table_lock);
  2076. return rc;
  2077. }
  2078. static int cam_sync_close(struct file *filep)
  2079. {
  2080. int rc = 0;
  2081. int i;
  2082. struct sync_device *sync_dev = video_drvdata(filep);
  2083. if (!sync_dev) {
  2084. CAM_ERR(CAM_SYNC, "Sync device NULL");
  2085. rc = -ENODEV;
  2086. return rc;
  2087. }
  2088. mutex_lock(&sync_dev->table_lock);
  2089. sync_dev->open_cnt--;
  2090. if (!sync_dev->open_cnt) {
  2091. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  2092. struct sync_table_row *row =
  2093. sync_dev->sync_table + i;
  2094. /*
  2095. * Signal all ACTIVE objects as ERR, but we don't
  2096. * care about the return status here apart from logging
  2097. * it.
  2098. */
  2099. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  2100. rc = cam_sync_signal(i,
  2101. CAM_SYNC_STATE_SIGNALED_ERROR,
  2102. CAM_SYNC_COMMON_RELEASE_EVENT);
  2103. if (rc < 0)
  2104. CAM_ERR(CAM_SYNC,
  2105. "Cleanup signal fail idx:%d", i);
  2106. }
  2107. }
  2108. /*
  2109. * Flush the work queue to wait for pending signal callbacks to
  2110. * finish
  2111. */
  2112. flush_workqueue(sync_dev->work_queue);
  2113. /*
  2114. * Now that all callbacks worker threads have finished,
  2115. * destroy the sync objects
  2116. */
  2117. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  2118. struct sync_table_row *row =
  2119. sync_dev->sync_table + i;
  2120. if (row->state != CAM_SYNC_STATE_INVALID) {
  2121. rc = cam_sync_destroy(i);
  2122. if (rc < 0)
  2123. CAM_ERR(CAM_SYNC,
  2124. "Cleanup destroy fail:idx:%d\n", i);
  2125. }
  2126. }
  2127. if (sync_dev->mon_data) {
  2128. for (i = 0; i < CAM_SYNC_MONITOR_TABLE_SIZE; i++)
  2129. kfree(sync_dev->mon_data[i]);
  2130. }
  2131. kfree(sync_dev->mon_data);
  2132. }
  2133. /* Clean dma fence table */
  2134. cam_dma_fence_close();
  2135. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2136. /* Clean synx obj table */
  2137. cam_synx_obj_close();
  2138. #endif
  2139. mutex_unlock(&sync_dev->table_lock);
  2140. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  2141. sync_dev->cam_sync_eventq = NULL;
  2142. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  2143. v4l2_fh_release(filep);
  2144. return rc;
  2145. }
  2146. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  2147. struct v4l2_event *new)
  2148. {
  2149. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  2150. struct cam_sync_ev_header_v2 *ev_header;
  2151. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  2152. CAM_ERR(CAM_CRM,
  2153. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  2154. old->id, ev_header->sync_obj, ev_header->status,
  2155. ev_header->evt_param[0], ev_header->evt_param[1],
  2156. ev_header->evt_param[2], ev_header->evt_param[3]);
  2157. } else {
  2158. struct cam_sync_ev_header *ev_header;
  2159. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  2160. CAM_ERR(CAM_CRM,
  2161. "Failed to notify event id %d fence %d statue %d",
  2162. old->id, ev_header->sync_obj, ev_header->status);
  2163. }
  2164. }
  2165. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  2166. .merge = cam_sync_event_queue_notify_error,
  2167. };
  2168. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  2169. const struct v4l2_event_subscription *sub)
  2170. {
  2171. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  2172. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  2173. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  2174. return -EINVAL;
  2175. }
  2176. sync_dev->version = sub->type;
  2177. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  2178. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  2179. &cam_sync_v4l2_ops);
  2180. }
  2181. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  2182. const struct v4l2_event_subscription *sub)
  2183. {
  2184. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  2185. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  2186. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  2187. return -EINVAL;
  2188. }
  2189. return v4l2_event_unsubscribe(fh, sub);
  2190. }
  2191. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  2192. .vidioc_subscribe_event = cam_sync_subscribe_event,
  2193. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  2194. .vidioc_default = cam_sync_dev_ioctl,
  2195. };
  2196. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  2197. .owner = THIS_MODULE,
  2198. .open = cam_sync_open,
  2199. .release = cam_sync_close,
  2200. .poll = cam_sync_poll,
  2201. .unlocked_ioctl = video_ioctl2,
  2202. #ifdef CONFIG_COMPAT
  2203. .compat_ioctl32 = video_ioctl2,
  2204. #endif
  2205. };
  2206. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  2207. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  2208. struct platform_device *pdev)
  2209. {
  2210. int rc;
  2211. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  2212. GFP_KERNEL);
  2213. if (!sync_dev->v4l2_dev.mdev)
  2214. return -ENOMEM;
  2215. media_device_init(sync_dev->v4l2_dev.mdev);
  2216. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  2217. sizeof(sync_dev->v4l2_dev.mdev->model));
  2218. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  2219. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  2220. if (rc < 0)
  2221. goto register_fail;
  2222. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  2223. if (rc < 0)
  2224. goto entity_fail;
  2225. return 0;
  2226. entity_fail:
  2227. media_device_unregister(sync_dev->v4l2_dev.mdev);
  2228. register_fail:
  2229. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  2230. return rc;
  2231. }
  2232. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  2233. {
  2234. media_entity_cleanup(&sync_dev->vdev->entity);
  2235. media_device_unregister(sync_dev->v4l2_dev.mdev);
  2236. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  2237. kfree(sync_dev->v4l2_dev.mdev);
  2238. }
  2239. static void cam_sync_init_entity(struct sync_device *sync_dev)
  2240. {
  2241. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  2242. sync_dev->vdev->entity.name =
  2243. video_device_node_name(sync_dev->vdev);
  2244. }
  2245. #else
  2246. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  2247. struct platform_device *pdev)
  2248. {
  2249. return 0;
  2250. }
  2251. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  2252. {
  2253. }
  2254. static void cam_sync_init_entity(struct sync_device *sync_dev)
  2255. {
  2256. }
  2257. #endif
  2258. static int cam_sync_create_debugfs(void)
  2259. {
  2260. int rc;
  2261. struct dentry *dbgfileptr = NULL;
  2262. if (!cam_debugfs_available())
  2263. return 0;
  2264. rc = cam_debugfs_create_subdir("sync", &dbgfileptr);
  2265. if (rc) {
  2266. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  2267. rc = -ENOENT;
  2268. goto end;
  2269. }
  2270. /* Store parent inode for cleanup in caller */
  2271. sync_dev->dentry = dbgfileptr;
  2272. debugfs_create_bool("trigger_cb_without_switch", 0644,
  2273. sync_dev->dentry, &trigger_cb_without_switch);
  2274. debugfs_create_ulong("cam_sync_monitor_mask", 0644,
  2275. sync_dev->dentry, &cam_sync_monitor_mask);
  2276. end:
  2277. return rc;
  2278. }
  2279. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  2280. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  2281. {
  2282. int rc;
  2283. uint32_t sync_status = synx_status;
  2284. switch (synx_status) {
  2285. case SYNX_STATE_ACTIVE:
  2286. sync_status = CAM_SYNC_STATE_ACTIVE;
  2287. break;
  2288. case SYNX_STATE_SIGNALED_SUCCESS:
  2289. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  2290. break;
  2291. case SYNX_STATE_SIGNALED_ERROR:
  2292. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  2293. break;
  2294. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  2295. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  2296. break;
  2297. default:
  2298. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  2299. synx_status, sync_obj);
  2300. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  2301. break;
  2302. }
  2303. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  2304. if (rc) {
  2305. CAM_ERR(CAM_SYNC,
  2306. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  2307. sync_obj, synx_status, sync_status, rc);
  2308. }
  2309. return rc;
  2310. }
  2311. int cam_synx_sync_register_callback(sync_callback cb_func,
  2312. void *userdata, int32_t sync_obj)
  2313. {
  2314. return cam_sync_register_callback(cb_func, userdata, sync_obj);
  2315. }
  2316. int cam_synx_sync_deregister_callback(sync_callback cb_func,
  2317. void *userdata, int32_t sync_obj)
  2318. {
  2319. return cam_sync_deregister_callback(cb_func, userdata, sync_obj);
  2320. }
  2321. static int cam_sync_register_synx_bind_ops(
  2322. struct synx_register_params *object)
  2323. {
  2324. int rc;
  2325. rc = synx_register_ops(object);
  2326. if (rc)
  2327. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  2328. return rc;
  2329. }
  2330. static void cam_sync_unregister_synx_bind_ops(
  2331. struct synx_register_params *object)
  2332. {
  2333. int rc;
  2334. rc = synx_deregister_ops(object);
  2335. if (rc)
  2336. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  2337. }
  2338. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  2339. {
  2340. struct synx_register_params *params = object;
  2341. params->name = CAM_SYNC_NAME;
  2342. params->type = SYNX_TYPE_CSL;
  2343. params->ops.register_callback = cam_synx_sync_register_callback;
  2344. params->ops.deregister_callback = cam_synx_sync_deregister_callback;
  2345. params->ops.enable_signaling = cam_sync_get_obj_ref;
  2346. params->ops.signal = cam_synx_sync_signal;
  2347. }
  2348. #endif
  2349. static int cam_sync_component_bind(struct device *dev,
  2350. struct device *master_dev, void *data)
  2351. {
  2352. int rc;
  2353. int idx;
  2354. struct platform_device *pdev = to_platform_device(dev);
  2355. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  2356. if (!sync_dev)
  2357. return -ENOMEM;
  2358. mutex_init(&sync_dev->table_lock);
  2359. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  2360. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  2361. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  2362. sync_dev->vdev = video_device_alloc();
  2363. if (!sync_dev->vdev) {
  2364. rc = -ENOMEM;
  2365. goto vdev_fail;
  2366. }
  2367. rc = cam_sync_media_controller_init(sync_dev, pdev);
  2368. if (rc < 0)
  2369. goto mcinit_fail;
  2370. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  2371. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  2372. if (rc < 0)
  2373. goto register_fail;
  2374. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  2375. sizeof(sync_dev->vdev->name));
  2376. sync_dev->vdev->release = video_device_release_empty;
  2377. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  2378. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  2379. sync_dev->vdev->minor = -1;
  2380. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  2381. sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO;
  2382. rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1);
  2383. if (rc < 0) {
  2384. CAM_ERR(CAM_SYNC,
  2385. "video device registration failure rc = %d, name = %s, device_caps = %d",
  2386. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  2387. goto v4l2_fail;
  2388. }
  2389. cam_sync_init_entity(sync_dev);
  2390. video_set_drvdata(sync_dev->vdev, sync_dev);
  2391. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  2392. /*
  2393. * We treat zero as invalid handle, so we will keep the 0th bit set
  2394. * always
  2395. */
  2396. set_bit(0, sync_dev->bitmap);
  2397. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  2398. WQ_HIGHPRI | WQ_UNBOUND, 1);
  2399. if (!sync_dev->work_queue) {
  2400. CAM_ERR(CAM_SYNC,
  2401. "Error: high priority work queue creation failed");
  2402. rc = -ENOMEM;
  2403. goto v4l2_fail;
  2404. }
  2405. /* Initialize dma fence driver */
  2406. rc = cam_dma_fence_driver_init();
  2407. if (rc) {
  2408. CAM_ERR(CAM_SYNC,
  2409. "DMA fence driver initialization failed rc: %d", rc);
  2410. goto workq_destroy;
  2411. }
  2412. trigger_cb_without_switch = false;
  2413. cam_sync_monitor_mask = 0;
  2414. cam_sync_create_debugfs();
  2415. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2416. /* Initialize synx obj driver */
  2417. rc = cam_synx_obj_driver_init();
  2418. if (rc) {
  2419. CAM_ERR(CAM_SYNC,
  2420. "Synx obj driver initialization failed rc: %d", rc);
  2421. goto dma_driver_deinit;
  2422. }
  2423. #elif IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  2424. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  2425. cam_sync_configure_synx_obj(&sync_dev->params);
  2426. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  2427. if (rc)
  2428. goto dma_driver_deinit;
  2429. #endif
  2430. CAM_DBG(CAM_SYNC, "Component bound successfully");
  2431. return rc;
  2432. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2433. dma_driver_deinit:
  2434. cam_dma_fence_driver_deinit();
  2435. #endif
  2436. workq_destroy:
  2437. destroy_workqueue(sync_dev->work_queue);
  2438. v4l2_fail:
  2439. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  2440. register_fail:
  2441. cam_sync_media_controller_cleanup(sync_dev);
  2442. mcinit_fail:
  2443. video_unregister_device(sync_dev->vdev);
  2444. video_device_release(sync_dev->vdev);
  2445. vdev_fail:
  2446. mutex_destroy(&sync_dev->table_lock);
  2447. kfree(sync_dev);
  2448. return rc;
  2449. }
  2450. static void cam_sync_component_unbind(struct device *dev,
  2451. struct device *master_dev, void *data)
  2452. {
  2453. int i;
  2454. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  2455. cam_sync_media_controller_cleanup(sync_dev);
  2456. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2457. cam_synx_obj_driver_deinit();
  2458. #elif IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  2459. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  2460. #endif
  2461. video_unregister_device(sync_dev->vdev);
  2462. video_device_release(sync_dev->vdev);
  2463. sync_dev->dentry = NULL;
  2464. cam_dma_fence_driver_deinit();
  2465. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  2466. spin_lock_init(&sync_dev->row_spinlocks[i]);
  2467. kfree(sync_dev);
  2468. sync_dev = NULL;
  2469. }
  2470. const static struct component_ops cam_sync_component_ops = {
  2471. .bind = cam_sync_component_bind,
  2472. .unbind = cam_sync_component_unbind,
  2473. };
  2474. static int cam_sync_probe(struct platform_device *pdev)
  2475. {
  2476. int rc;
  2477. CAM_DBG(CAM_SYNC, "Adding Sync component");
  2478. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  2479. if (rc)
  2480. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  2481. return rc;
  2482. }
  2483. static int cam_sync_remove(struct platform_device *pdev)
  2484. {
  2485. component_del(&pdev->dev, &cam_sync_component_ops);
  2486. return 0;
  2487. }
  2488. static const struct of_device_id cam_sync_dt_match[] = {
  2489. {.compatible = "qcom,cam-sync"},
  2490. {}
  2491. };
  2492. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  2493. struct platform_driver cam_sync_driver = {
  2494. .probe = cam_sync_probe,
  2495. .remove = cam_sync_remove,
  2496. .driver = {
  2497. .name = "cam_sync",
  2498. .owner = THIS_MODULE,
  2499. .of_match_table = cam_sync_dt_match,
  2500. .suppress_bind_attrs = true,
  2501. },
  2502. };
  2503. int cam_sync_init(void)
  2504. {
  2505. return platform_driver_register(&cam_sync_driver);
  2506. }
  2507. void cam_sync_exit(void)
  2508. {
  2509. platform_driver_unregister(&cam_sync_driver);
  2510. }
  2511. MODULE_DESCRIPTION("Camera sync driver");
  2512. MODULE_LICENSE("GPL v2");