cam_sync.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/irqflags.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/debugfs.h>
  12. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  13. #include <synx_api.h>
  14. #endif
  15. #include "cam_sync_util.h"
  16. #include "cam_debug_util.h"
  17. #include "cam_common_util.h"
  18. #include "cam_compat.h"
  19. #include "camera_main.h"
  20. #include "cam_req_mgr_workq.h"
  21. struct sync_device *sync_dev;
  22. /*
  23. * Flag to determine whether to enqueue cb of a
  24. * signaled fence onto the workq or invoke it
  25. * directly in the same context
  26. */
  27. static bool trigger_cb_without_switch;
  28. unsigned long cam_sync_monitor_mask;
  29. static void cam_sync_print_fence_table(void)
  30. {
  31. int idx;
  32. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) {
  33. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  34. CAM_INFO(CAM_SYNC,
  35. "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d",
  36. idx,
  37. sync_dev->sync_table[idx].sync_id,
  38. sync_dev->sync_table[idx].name,
  39. sync_dev->sync_table[idx].type,
  40. sync_dev->sync_table[idx].state,
  41. atomic_read(&sync_dev->sync_table[idx].ref_cnt));
  42. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  43. }
  44. }
  45. static int cam_sync_create_util(
  46. int32_t *sync_obj, const char *name,
  47. struct cam_dma_fence_create_sync_obj_payload *dma_sync_create_info,
  48. struct sync_synx_obj_info *synx_obj_sync_create_info)
  49. {
  50. int rc;
  51. long idx;
  52. bool bit;
  53. struct sync_table_row *row = NULL;
  54. do {
  55. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  56. if (idx >= CAM_SYNC_MAX_OBJS) {
  57. CAM_ERR(CAM_SYNC,
  58. "Error: Unable to create sync idx = %d sync name = %s reached max!",
  59. idx, name);
  60. cam_sync_print_fence_table();
  61. return -ENOMEM;
  62. }
  63. CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
  64. bit = test_and_set_bit(idx, sync_dev->bitmap);
  65. } while (bit);
  66. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  67. rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
  68. CAM_SYNC_TYPE_INDV);
  69. if (rc) {
  70. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  71. idx);
  72. clear_bit(idx, sync_dev->bitmap);
  73. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  74. return -EINVAL;
  75. }
  76. *sync_obj = idx;
  77. /* Associate sync obj with synx if any holding sync lock */
  78. if (synx_obj_sync_create_info) {
  79. row = sync_dev->sync_table + idx;
  80. row->synx_obj_info.synx_obj_row_idx =
  81. synx_obj_sync_create_info->synx_obj_row_idx;
  82. row->synx_obj_info.sync_created_with_synx =
  83. synx_obj_sync_create_info->sync_created_with_synx;
  84. row->synx_obj_info.synx_obj = synx_obj_sync_create_info->synx_obj;
  85. set_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask);
  86. CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with synx_obj: %d",
  87. name, *sync_obj, row->synx_obj_info.synx_obj);
  88. }
  89. /* Associate sync obj with dma fence if any holding sync lock */
  90. if (dma_sync_create_info) {
  91. row = sync_dev->sync_table + idx;
  92. row->dma_fence_info.dma_fence_fd = dma_sync_create_info->fd;
  93. row->dma_fence_info.dma_fence_row_idx = dma_sync_create_info->dma_fence_row_idx;
  94. row->dma_fence_info.sync_created_with_dma =
  95. dma_sync_create_info->sync_created_with_dma;
  96. set_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask);
  97. /* Association refcnt for non-import cases */
  98. if (dma_sync_create_info->sync_created_with_dma) {
  99. rc = cam_dma_fence_get_put_ref(true, row->dma_fence_info.dma_fence_row_idx);
  100. if (rc)
  101. CAM_ERR(CAM_SYNC,
  102. "Failed to getref on dma fence idx: %u fd: %d sync_obj: %d rc: %d",
  103. row->dma_fence_info.dma_fence_row_idx,
  104. row->dma_fence_info.dma_fence_fd,
  105. *sync_obj, rc);
  106. goto end;
  107. }
  108. CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with dma fence fd: %d",
  109. name, *sync_obj, dma_sync_create_info->fd);
  110. goto end;
  111. }
  112. CAM_DBG(CAM_SYNC, "sync_obj: %s[%i]", name, *sync_obj);
  113. end:
  114. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  115. return rc;
  116. }
  117. int cam_sync_create(int32_t *sync_obj, const char *name)
  118. {
  119. return cam_sync_create_util(sync_obj, name, NULL, NULL);
  120. }
  121. int cam_sync_register_callback(sync_callback cb_func,
  122. void *userdata, int32_t sync_obj)
  123. {
  124. struct sync_callback_info *sync_cb;
  125. struct sync_table_row *row = NULL;
  126. int status = 0, rc = 0;
  127. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0) || (!cb_func))
  128. return -EINVAL;
  129. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  130. row = sync_dev->sync_table + sync_obj;
  131. if (row->state == CAM_SYNC_STATE_INVALID) {
  132. CAM_ERR(CAM_SYNC,
  133. "Error: accessing an uninitialized sync obj %s[%d]",
  134. row->name, sync_obj);
  135. rc = -EINVAL;
  136. goto monitor_dump;
  137. }
  138. sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
  139. if (!sync_cb) {
  140. rc = -ENOMEM;
  141. goto monitor_dump;
  142. }
  143. /* Trigger callback if sync object is already in SIGNALED state */
  144. if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  145. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  146. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
  147. (!row->remaining)) {
  148. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  149. &cam_sync_monitor_mask))
  150. cam_generic_fence_update_monitor_array(sync_obj,
  151. &sync_dev->table_lock, sync_dev->mon_data,
  152. CAM_FENCE_OP_SKIP_REGISTER_CB);
  153. if (trigger_cb_without_switch) {
  154. CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]",
  155. row->name,
  156. sync_obj);
  157. status = row->state;
  158. kfree(sync_cb);
  159. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  160. cb_func(sync_obj, status, userdata);
  161. } else {
  162. sync_cb->callback_func = cb_func;
  163. sync_cb->cb_data = userdata;
  164. sync_cb->sync_obj = sync_obj;
  165. INIT_WORK(&sync_cb->cb_dispatch_work,
  166. cam_sync_util_cb_dispatch);
  167. sync_cb->status = row->state;
  168. CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%s[%d]",
  169. row->name,
  170. sync_cb->sync_obj);
  171. sync_cb->workq_scheduled_ts = ktime_get();
  172. queue_work(sync_dev->work_queue,
  173. &sync_cb->cb_dispatch_work);
  174. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  175. }
  176. return 0;
  177. }
  178. sync_cb->callback_func = cb_func;
  179. sync_cb->cb_data = userdata;
  180. sync_cb->sync_obj = sync_obj;
  181. INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
  182. list_add_tail(&sync_cb->list, &row->callback_list);
  183. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
  184. cam_generic_fence_update_monitor_array(sync_obj,
  185. &sync_dev->table_lock, sync_dev->mon_data,
  186. CAM_FENCE_OP_REGISTER_CB);
  187. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  188. return 0;
  189. monitor_dump:
  190. cam_sync_dump_monitor_array(row);
  191. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  192. return rc;
  193. }
  194. int cam_sync_deregister_callback(sync_callback cb_func,
  195. void *userdata, int32_t sync_obj)
  196. {
  197. struct sync_table_row *row = NULL;
  198. struct sync_callback_info *sync_cb, *temp;
  199. bool found = false;
  200. int rc = 0;
  201. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  202. return -EINVAL;
  203. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  204. row = sync_dev->sync_table + sync_obj;
  205. if (row->state == CAM_SYNC_STATE_INVALID) {
  206. CAM_ERR(CAM_SYNC,
  207. "Error: accessing an uninitialized sync obj = %s[%d]",
  208. row->name, sync_obj);
  209. rc = -EINVAL;
  210. goto monitor_dump;
  211. }
  212. CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]",
  213. row->name, sync_obj);
  214. list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
  215. if ((sync_cb->callback_func == cb_func) &&
  216. (sync_cb->cb_data == userdata)) {
  217. list_del_init(&sync_cb->list);
  218. kfree(sync_cb);
  219. found = true;
  220. }
  221. }
  222. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  223. if (found) {
  224. cam_generic_fence_update_monitor_array(sync_obj,
  225. &sync_dev->table_lock, sync_dev->mon_data,
  226. CAM_FENCE_OP_UNREGISTER_CB);
  227. } else {
  228. CAM_ERR(CAM_SYNC,
  229. "Error: Callback not found sync obj = %s[%d] : sync_id %d, state %d",
  230. row->name, sync_obj, row->sync_id, row->state);
  231. cam_sync_dump_monitor_array(row);
  232. }
  233. }
  234. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  235. return found ? 0 : -ENOENT;
  236. monitor_dump:
  237. cam_sync_dump_monitor_array(row);
  238. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  239. return rc;
  240. }
  241. static inline int cam_sync_signal_dma_fence_util(
  242. struct sync_table_row *row, uint32_t status)
  243. {
  244. struct cam_dma_fence_signal signal_dma_fence;
  245. signal_dma_fence.dma_fence_fd = row->dma_fence_info.dma_fence_fd;
  246. switch (status) {
  247. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  248. signal_dma_fence.status = 0;
  249. break;
  250. case CAM_SYNC_STATE_SIGNALED_ERROR:
  251. /* Advertise error */
  252. signal_dma_fence.status = -EADV;
  253. break;
  254. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  255. signal_dma_fence.status = -ECANCELED;
  256. break;
  257. default:
  258. CAM_ERR(CAM_SYNC,
  259. "Signaling undefined status: %d for sync obj: %d",
  260. status, row->sync_id);
  261. return -EINVAL;
  262. }
  263. return cam_dma_fence_internal_signal(row->dma_fence_info.dma_fence_row_idx,
  264. &signal_dma_fence);
  265. }
  266. static void cam_sync_signal_parent_util(int32_t status,
  267. uint32_t event_cause, struct list_head *parents_list)
  268. {
  269. int rc;
  270. struct sync_table_row *parent_row = NULL;
  271. struct sync_parent_info *parent_info, *temp_parent_info;
  272. /*
  273. * Now iterate over all parents of this object and if they too need to
  274. * be signaled dispatch cb's
  275. */
  276. list_for_each_entry_safe(parent_info, temp_parent_info,
  277. parents_list, list) {
  278. parent_row = sync_dev->sync_table + parent_info->sync_id;
  279. spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  280. parent_row->remaining--;
  281. rc = cam_sync_util_update_parent_state(
  282. parent_row,
  283. status);
  284. if (rc) {
  285. CAM_ERR(CAM_SYNC, "Invalid parent state %d",
  286. parent_row->state);
  287. spin_unlock_bh(
  288. &sync_dev->row_spinlocks[parent_info->sync_id]);
  289. kfree(parent_info);
  290. continue;
  291. }
  292. if (!parent_row->remaining)
  293. cam_sync_util_dispatch_signaled_cb(
  294. parent_info->sync_id, parent_row->state,
  295. event_cause);
  296. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  297. &cam_sync_monitor_mask))
  298. cam_generic_fence_update_monitor_array(parent_info->sync_id,
  299. &sync_dev->table_lock, sync_dev->mon_data,
  300. CAM_FENCE_OP_SIGNAL);
  301. spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
  302. list_del_init(&parent_info->list);
  303. kfree(parent_info);
  304. }
  305. }
  306. static int cam_sync_signal_validate_util(
  307. int32_t sync_obj, int32_t status)
  308. {
  309. struct sync_table_row *row = sync_dev->sync_table + sync_obj;
  310. if (row->state == CAM_SYNC_STATE_INVALID) {
  311. CAM_ERR(CAM_SYNC,
  312. "Error: accessing an uninitialized sync obj = %s[%d]",
  313. row->name, sync_obj);
  314. return -EINVAL;
  315. }
  316. if (row->type == CAM_SYNC_TYPE_GROUP) {
  317. CAM_ERR(CAM_SYNC,
  318. "Error: Signaling a GROUP sync object = %s[%d]",
  319. row->name, sync_obj);
  320. return -EINVAL;
  321. }
  322. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  323. CAM_ERR(CAM_SYNC,
  324. "Error: Sync object already signaled sync_obj = %s[%d]",
  325. row->name, sync_obj);
  326. return -EALREADY;
  327. }
  328. if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) &&
  329. (status != CAM_SYNC_STATE_SIGNALED_ERROR) &&
  330. (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  331. CAM_ERR(CAM_SYNC,
  332. "Error: signaling with undefined status = %d", status);
  333. return -EINVAL;
  334. }
  335. return 0;
  336. }
  337. int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
  338. {
  339. struct sync_table_row *row = NULL;
  340. struct list_head parents_list;
  341. int rc = 0;
  342. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  343. uint32_t synx_row_idx;
  344. struct cam_synx_obj_signal signal_synx_obj;
  345. #endif
  346. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) {
  347. CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
  348. sync_obj, CAM_SYNC_MAX_OBJS);
  349. return -EINVAL;
  350. }
  351. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  352. row = sync_dev->sync_table + sync_obj;
  353. rc = cam_sync_signal_validate_util(sync_obj, status);
  354. if (rc) {
  355. CAM_ERR(CAM_SYNC,
  356. "Error: Failed to validate signal info for sync_obj = %s[%d] with status = %d rc = %d",
  357. row->name, sync_obj, status, rc);
  358. goto monitor_dump;
  359. }
  360. if (!atomic_dec_and_test(&row->ref_cnt)) {
  361. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  362. return 0;
  363. }
  364. row->state = status;
  365. /*
  366. * Signal associated dma fence first - external entities
  367. * waiting on this fence can start processing
  368. */
  369. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  370. rc = cam_sync_signal_dma_fence_util(row, status);
  371. if (rc) {
  372. CAM_ERR(CAM_SYNC,
  373. "Error: Failed to signal associated dma fencefd = %d for sync_obj = %s[%d]",
  374. row->dma_fence_info.dma_fence_fd, row->name, sync_obj);
  375. cam_sync_dump_monitor_array(row);
  376. }
  377. }
  378. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  379. /*
  380. * Signal associated synx obj prior to sync
  381. */
  382. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  383. signal_synx_obj.status = status;
  384. signal_synx_obj.synx_obj = row->synx_obj_info.synx_obj;
  385. synx_row_idx = row->synx_obj_info.synx_obj_row_idx;
  386. /* Release & obtain the row lock after synx signal */
  387. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  388. rc = cam_synx_obj_internal_signal(synx_row_idx, &signal_synx_obj);
  389. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  390. if (rc) {
  391. CAM_ERR(CAM_SYNC,
  392. "Error: Failed to signal associated synx obj = %d for sync_obj = %d",
  393. signal_synx_obj.synx_obj, sync_obj);
  394. cam_sync_dump_monitor_array(row);
  395. }
  396. }
  397. #endif
  398. cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause);
  399. /* copy parent list to local and release child lock */
  400. INIT_LIST_HEAD(&parents_list);
  401. list_splice_init(&row->parents_list, &parents_list);
  402. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
  403. cam_generic_fence_update_monitor_array(sync_obj,
  404. &sync_dev->table_lock, sync_dev->mon_data,
  405. CAM_FENCE_OP_SIGNAL);
  406. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  407. if (list_empty(&parents_list))
  408. return 0;
  409. cam_sync_signal_parent_util(status, event_cause, &parents_list);
  410. return 0;
  411. monitor_dump:
  412. cam_sync_dump_monitor_array(row);
  413. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  414. return rc;
  415. }
  416. int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
  417. {
  418. int rc, i;
  419. long idx = 0;
  420. bool bit;
  421. if ((!sync_obj) || (!merged_obj)) {
  422. CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
  423. return -EINVAL;
  424. }
  425. if (num_objs <= 1) {
  426. CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
  427. return -EINVAL;
  428. }
  429. if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
  430. != num_objs) {
  431. CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
  432. return -EINVAL;
  433. }
  434. for (i = 0; i < num_objs; i++) {
  435. rc = cam_sync_check_valid(sync_obj[i]);
  436. if (rc) {
  437. CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail",
  438. i, sync_obj[i]);
  439. return rc;
  440. }
  441. }
  442. do {
  443. idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  444. if (idx >= CAM_SYNC_MAX_OBJS)
  445. return -ENOMEM;
  446. bit = test_and_set_bit(idx, sync_dev->bitmap);
  447. } while (bit);
  448. spin_lock_bh(&sync_dev->row_spinlocks[idx]);
  449. rc = cam_sync_init_group_object(sync_dev->sync_table,
  450. idx, sync_obj, num_objs);
  451. if (rc < 0) {
  452. CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
  453. idx);
  454. clear_bit(idx, sync_dev->bitmap);
  455. return -EINVAL;
  456. }
  457. CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
  458. *merged_obj = idx;
  459. spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
  460. return 0;
  461. }
  462. int cam_sync_get_obj_ref(int32_t sync_obj)
  463. {
  464. struct sync_table_row *row = NULL;
  465. int rc;
  466. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  467. return -EINVAL;
  468. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  469. row = sync_dev->sync_table + sync_obj;
  470. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  471. CAM_ERR(CAM_SYNC,
  472. "Error: accessing an uninitialized sync obj = %s[%d]",
  473. row->name, sync_obj);
  474. rc = -EINVAL;
  475. goto monitor_dump;
  476. }
  477. atomic_inc(&row->ref_cnt);
  478. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  479. CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
  480. return 0;
  481. monitor_dump:
  482. cam_sync_dump_monitor_array(row);
  483. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  484. return rc;
  485. }
  486. int cam_sync_put_obj_ref(int32_t sync_obj)
  487. {
  488. struct sync_table_row *row = NULL;
  489. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  490. return -EINVAL;
  491. row = sync_dev->sync_table + sync_obj;
  492. atomic_dec(&row->ref_cnt);
  493. CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
  494. return 0;
  495. }
  496. int cam_sync_destroy(int32_t sync_obj)
  497. {
  498. return cam_sync_deinit_object(sync_dev->sync_table, sync_obj, NULL, NULL);
  499. }
  500. int cam_sync_check_valid(int32_t sync_obj)
  501. {
  502. struct sync_table_row *row = NULL;
  503. int rc;
  504. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  505. return -EINVAL;
  506. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  507. row = sync_dev->sync_table + sync_obj;
  508. if (!test_bit(sync_obj, sync_dev->bitmap)) {
  509. CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]",
  510. row->name, sync_obj);
  511. rc = -EINVAL;
  512. goto monitor_dump;
  513. }
  514. if (row->state == CAM_SYNC_STATE_INVALID) {
  515. CAM_ERR(CAM_SYNC,
  516. "Error: accessing an uninitialized sync obj = %s[%d]",
  517. row->name, sync_obj);
  518. rc = -EINVAL;
  519. goto monitor_dump;
  520. }
  521. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  522. return 0;
  523. monitor_dump:
  524. cam_sync_dump_monitor_array(row);
  525. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  526. return rc;
  527. }
  528. int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
  529. {
  530. unsigned long timeleft;
  531. int rc;
  532. struct sync_table_row *row = NULL;
  533. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  534. return -EINVAL;
  535. row = sync_dev->sync_table + sync_obj;
  536. if (row->state == CAM_SYNC_STATE_INVALID) {
  537. CAM_ERR(CAM_SYNC,
  538. "Error: accessing an uninitialized sync obj = %s[%d]",
  539. row->name, sync_obj);
  540. rc = -EINVAL;
  541. goto monitor_dump;
  542. }
  543. timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
  544. msecs_to_jiffies(timeout_ms));
  545. if (!timeleft) {
  546. CAM_ERR(CAM_SYNC,
  547. "Error: timed out for sync obj = %s[%d]", row->name, sync_obj);
  548. rc = -ETIMEDOUT;
  549. goto monitor_dump;
  550. } else {
  551. switch (row->state) {
  552. case CAM_SYNC_STATE_INVALID:
  553. case CAM_SYNC_STATE_ACTIVE:
  554. case CAM_SYNC_STATE_SIGNALED_ERROR:
  555. case CAM_SYNC_STATE_SIGNALED_CANCEL:
  556. CAM_ERR(CAM_SYNC,
  557. "Error: Wait on invalid state = %d, obj = %d, name = %s",
  558. row->state, sync_obj, row->name);
  559. rc = -EINVAL;
  560. goto monitor_dump;
  561. case CAM_SYNC_STATE_SIGNALED_SUCCESS:
  562. rc = 0;
  563. break;
  564. default:
  565. rc = -EINVAL;
  566. goto monitor_dump;
  567. }
  568. }
  569. return rc;
  570. monitor_dump:
  571. cam_sync_dump_monitor_array(row);
  572. return rc;
  573. }
  574. static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
  575. {
  576. struct cam_sync_info sync_create;
  577. int result;
  578. if (k_ioctl->size != sizeof(struct cam_sync_info))
  579. return -EINVAL;
  580. if (!k_ioctl->ioctl_ptr)
  581. return -EINVAL;
  582. if (copy_from_user(&sync_create,
  583. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  584. k_ioctl->size))
  585. return -EFAULT;
  586. sync_create.name[SYNC_DEBUG_NAME_LEN] = '\0';
  587. result = cam_sync_create(&sync_create.sync_obj,
  588. sync_create.name);
  589. if (!result)
  590. if (copy_to_user(
  591. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  592. &sync_create, k_ioctl->size))
  593. return -EFAULT;
  594. return result;
  595. }
  596. static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
  597. {
  598. int rc;
  599. struct cam_sync_signal sync_signal;
  600. if (k_ioctl->size != sizeof(struct cam_sync_signal))
  601. return -EINVAL;
  602. if (!k_ioctl->ioctl_ptr)
  603. return -EINVAL;
  604. if (copy_from_user(&sync_signal,
  605. u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size))
  606. return -EFAULT;
  607. /* need to get ref for UMD signaled fences */
  608. rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
  609. if (rc) {
  610. CAM_DBG(CAM_SYNC,
  611. "Error: cannot signal an uninitialized sync obj = %d",
  612. sync_signal.sync_obj);
  613. return rc;
  614. }
  615. return cam_sync_signal(sync_signal.sync_obj,
  616. sync_signal.sync_state,
  617. CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT);
  618. }
  619. static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
  620. {
  621. struct cam_sync_merge sync_merge;
  622. uint32_t *sync_objs;
  623. uint32_t num_objs;
  624. uint32_t size;
  625. int result;
  626. if (k_ioctl->size != sizeof(struct cam_sync_merge))
  627. return -EINVAL;
  628. if (!k_ioctl->ioctl_ptr)
  629. return -EINVAL;
  630. if (copy_from_user(&sync_merge,
  631. u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size))
  632. return -EFAULT;
  633. if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
  634. return -EINVAL;
  635. size = sizeof(uint32_t) * sync_merge.num_objs;
  636. sync_objs = kzalloc(size, GFP_ATOMIC);
  637. if (!sync_objs)
  638. return -ENOMEM;
  639. if (copy_from_user(sync_objs,
  640. u64_to_user_ptr(sync_merge.sync_objs),
  641. sizeof(uint32_t) * sync_merge.num_objs)) {
  642. kfree(sync_objs);
  643. return -EFAULT;
  644. }
  645. num_objs = sync_merge.num_objs;
  646. result = cam_sync_merge(sync_objs,
  647. num_objs, &sync_merge.merged);
  648. if (!result)
  649. if (copy_to_user(
  650. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  651. &sync_merge, k_ioctl->size)) {
  652. kfree(sync_objs);
  653. return -EFAULT;
  654. }
  655. kfree(sync_objs);
  656. return result;
  657. }
  658. static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
  659. {
  660. struct cam_sync_wait sync_wait;
  661. if (k_ioctl->size != sizeof(struct cam_sync_wait))
  662. return -EINVAL;
  663. if (!k_ioctl->ioctl_ptr)
  664. return -EINVAL;
  665. if (copy_from_user(&sync_wait,
  666. u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size))
  667. return -EFAULT;
  668. k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
  669. sync_wait.timeout_ms);
  670. return 0;
  671. }
  672. static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
  673. {
  674. struct cam_sync_info sync_create;
  675. if (k_ioctl->size != sizeof(struct cam_sync_info))
  676. return -EINVAL;
  677. if (!k_ioctl->ioctl_ptr)
  678. return -EINVAL;
  679. if (copy_from_user(&sync_create,
  680. u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size))
  681. return -EFAULT;
  682. return cam_sync_destroy(sync_create.sync_obj);
  683. }
  684. static int cam_sync_handle_register_user_payload(
  685. struct cam_private_ioctl_arg *k_ioctl)
  686. {
  687. struct cam_sync_userpayload_info userpayload_info;
  688. struct sync_user_payload *user_payload_kernel;
  689. struct sync_user_payload *user_payload_iter;
  690. struct sync_user_payload *temp_upayload_kernel;
  691. uint32_t sync_obj;
  692. struct sync_table_row *row = NULL;
  693. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
  694. return -EINVAL;
  695. if (!k_ioctl->ioctl_ptr)
  696. return -EINVAL;
  697. if (copy_from_user(&userpayload_info,
  698. u64_to_user_ptr(k_ioctl->ioctl_ptr),
  699. k_ioctl->size))
  700. return -EFAULT;
  701. sync_obj = userpayload_info.sync_obj;
  702. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  703. return -EINVAL;
  704. user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
  705. if (!user_payload_kernel)
  706. return -ENOMEM;
  707. memcpy(user_payload_kernel->payload_data,
  708. userpayload_info.payload,
  709. CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
  710. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  711. row = sync_dev->sync_table + sync_obj;
  712. if (row->state == CAM_SYNC_STATE_INVALID) {
  713. CAM_ERR(CAM_SYNC,
  714. "Error: accessing an uninitialized sync obj = %s[%d]",
  715. row->name, sync_obj);
  716. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  717. kfree(user_payload_kernel);
  718. return -EINVAL;
  719. }
  720. if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
  721. (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
  722. (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
  723. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  724. &cam_sync_monitor_mask))
  725. cam_generic_fence_update_monitor_array(sync_obj,
  726. &sync_dev->table_lock, sync_dev->mon_data,
  727. CAM_FENCE_OP_SKIP_REGISTER_CB);
  728. cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
  729. sync_obj, row->state,
  730. user_payload_kernel->payload_data,
  731. CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64),
  732. CAM_SYNC_COMMON_REG_PAYLOAD_EVENT);
  733. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  734. kfree(user_payload_kernel);
  735. return 0;
  736. }
  737. list_for_each_entry_safe(user_payload_iter,
  738. temp_upayload_kernel, &row->user_payload_list, list) {
  739. if (user_payload_iter->payload_data[0] ==
  740. user_payload_kernel->payload_data[0] &&
  741. user_payload_iter->payload_data[1] ==
  742. user_payload_kernel->payload_data[1]) {
  743. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  744. &cam_sync_monitor_mask))
  745. cam_generic_fence_update_monitor_array(sync_obj,
  746. &sync_dev->table_lock, sync_dev->mon_data,
  747. CAM_FENCE_OP_ALREADY_REGISTERED_CB);
  748. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  749. kfree(user_payload_kernel);
  750. return -EALREADY;
  751. }
  752. }
  753. list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
  754. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
  755. cam_generic_fence_update_monitor_array(sync_obj,
  756. &sync_dev->table_lock, sync_dev->mon_data,
  757. CAM_FENCE_OP_REGISTER_CB);
  758. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  759. return 0;
  760. }
  761. static int cam_sync_handle_deregister_user_payload(
  762. struct cam_private_ioctl_arg *k_ioctl)
  763. {
  764. struct cam_sync_userpayload_info userpayload_info;
  765. struct sync_user_payload *user_payload_kernel, *temp;
  766. uint32_t sync_obj;
  767. struct sync_table_row *row = NULL;
  768. if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
  769. CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
  770. return -EINVAL;
  771. }
  772. if (!k_ioctl->ioctl_ptr) {
  773. CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
  774. return -EINVAL;
  775. }
  776. if (copy_from_user(&userpayload_info,
  777. u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size))
  778. return -EFAULT;
  779. sync_obj = userpayload_info.sync_obj;
  780. if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
  781. return -EINVAL;
  782. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  783. row = sync_dev->sync_table + sync_obj;
  784. if (row->state == CAM_SYNC_STATE_INVALID) {
  785. CAM_ERR(CAM_SYNC,
  786. "Error: accessing an uninitialized sync obj = %s[%d]",
  787. row->name,
  788. sync_obj);
  789. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  790. return -EINVAL;
  791. }
  792. list_for_each_entry_safe(user_payload_kernel, temp,
  793. &row->user_payload_list, list) {
  794. if (user_payload_kernel->payload_data[0] ==
  795. userpayload_info.payload[0] &&
  796. user_payload_kernel->payload_data[1] ==
  797. userpayload_info.payload[1]) {
  798. list_del_init(&user_payload_kernel->list);
  799. kfree(user_payload_kernel);
  800. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
  801. &cam_sync_monitor_mask))
  802. cam_generic_fence_update_monitor_array(sync_obj,
  803. &sync_dev->table_lock, sync_dev->mon_data,
  804. CAM_FENCE_OP_UNREGISTER_CB);
  805. }
  806. }
  807. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  808. return 0;
  809. }
  810. static int cam_sync_dma_fence_cb(
  811. int32_t sync_obj,
  812. struct cam_dma_fence_signal_sync_obj *signal_sync_obj)
  813. {
  814. int32_t rc;
  815. int32_t status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  816. struct sync_table_row *row = NULL;
  817. struct list_head parents_list;
  818. if (!signal_sync_obj) {
  819. CAM_ERR(CAM_SYNC, "Invalid signal info args");
  820. return -EINVAL;
  821. }
  822. /* Validate sync object range */
  823. if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) {
  824. CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
  825. return -EINVAL;
  826. }
  827. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  828. row = sync_dev->sync_table + sync_obj;
  829. /* Validate if sync obj has a dma fence association */
  830. if (!test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
  831. CAM_ERR(CAM_SYNC,
  832. "sync obj = %d[%s] has no associated dma fence ext_fence_mask = 0x%x",
  833. sync_obj, row->name, row->ext_fence_mask);
  834. rc = -EINVAL;
  835. goto end;
  836. }
  837. /* Validate if we are signaling the right sync obj based on dma fence fd */
  838. if (row->dma_fence_info.dma_fence_fd != signal_sync_obj->fd) {
  839. CAM_ERR(CAM_SYNC,
  840. "sync obj: %d[%s] is associated with a different fd: %d, signaling for fd: %d",
  841. sync_obj, row->name, row->dma_fence_info.dma_fence_fd, signal_sync_obj->fd);
  842. rc = -EINVAL;
  843. goto end;
  844. }
  845. /* Check for error status */
  846. if (signal_sync_obj->status < 0) {
  847. if (signal_sync_obj->status == -ECANCELED)
  848. status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  849. else
  850. status = CAM_SYNC_STATE_SIGNALED_ERROR;
  851. }
  852. rc = cam_sync_signal_validate_util(sync_obj, status);
  853. if (rc) {
  854. CAM_ERR(CAM_SYNC,
  855. "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d",
  856. sync_obj, row->name, status, rc);
  857. goto end;
  858. }
  859. /* Adding dma fence reference on sync */
  860. atomic_inc(&row->ref_cnt);
  861. if (!atomic_dec_and_test(&row->ref_cnt))
  862. goto end;
  863. row->state = status;
  864. cam_sync_util_dispatch_signaled_cb(sync_obj, status, 0);
  865. INIT_LIST_HEAD(&parents_list);
  866. list_splice_init(&row->parents_list, &parents_list);
  867. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  868. if (list_empty(&parents_list))
  869. return 0;
  870. cam_sync_signal_parent_util(status, 0x0, &parents_list);
  871. return 0;
  872. end:
  873. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  874. return rc;
  875. }
  876. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  877. static int cam_sync_synx_obj_cb(int32_t sync_obj,
  878. struct cam_synx_obj_signal_sync_obj *signal_sync_obj)
  879. {
  880. int32_t rc;
  881. struct sync_table_row *row = NULL;
  882. struct list_head parents_list;
  883. if (!signal_sync_obj) {
  884. CAM_ERR(CAM_SYNC, "Invalid signal info args");
  885. return -EINVAL;
  886. }
  887. /* Validate sync object range */
  888. if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) {
  889. CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
  890. return -EINVAL;
  891. }
  892. spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
  893. row = sync_dev->sync_table + sync_obj;
  894. /* Validate if sync obj has a synx obj association */
  895. if (!test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) {
  896. CAM_ERR(CAM_SYNC,
  897. "sync obj = %d[%s] has no associated synx obj ext_fence_mask = 0x%x",
  898. sync_obj, row->name, row->ext_fence_mask);
  899. rc = -EINVAL;
  900. goto end;
  901. }
  902. /* Validate if we are signaling the right sync obj based on synx handle */
  903. if (row->synx_obj_info.synx_obj != signal_sync_obj->synx_obj) {
  904. CAM_ERR(CAM_SYNC,
  905. "sync obj: %d[%s] is associated with a different synx obj: %d, signaling for synx obj: %d",
  906. sync_obj, row->name, row->synx_obj_info.synx_obj,
  907. signal_sync_obj->synx_obj);
  908. rc = -EINVAL;
  909. goto end;
  910. }
  911. rc = cam_sync_signal_validate_util(sync_obj, signal_sync_obj->status);
  912. if (rc) {
  913. CAM_ERR(CAM_SYNC,
  914. "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d",
  915. sync_obj, row->name, signal_sync_obj->status, rc);
  916. goto end;
  917. }
  918. /* Adding synx reference on sync */
  919. atomic_inc(&row->ref_cnt);
  920. if (!atomic_dec_and_test(&row->ref_cnt)) {
  921. CAM_DBG(CAM_SYNC, "Sync = %d[%s] fence still has references, synx_hdl = %d",
  922. sync_obj, row->name, signal_sync_obj->synx_obj);
  923. goto end;
  924. }
  925. row->state = signal_sync_obj->status;
  926. cam_sync_util_dispatch_signaled_cb(sync_obj, signal_sync_obj->status, 0);
  927. INIT_LIST_HEAD(&parents_list);
  928. list_splice_init(&row->parents_list, &parents_list);
  929. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  930. if (list_empty(&parents_list))
  931. return 0;
  932. cam_sync_signal_parent_util(signal_sync_obj->status, 0x0, &parents_list);
  933. CAM_DBG(CAM_SYNC,
  934. "Successfully signaled sync obj = %d with status = %d via synx obj = %d signal callback",
  935. sync_obj, signal_sync_obj->status, signal_sync_obj->synx_obj);
  936. return 0;
  937. end:
  938. spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
  939. return rc;
  940. }
  941. #endif
  942. static int cam_generic_fence_alloc_validate_input_info_util(
  943. struct cam_generic_fence_cmd_args *fence_cmd_args,
  944. struct cam_generic_fence_input_info **fence_input_info)
  945. {
  946. int rc = 0;
  947. struct cam_generic_fence_input_info *fence_input = NULL;
  948. uint32_t num_fences;
  949. size_t expected_size;
  950. *fence_input_info = NULL;
  951. if (fence_cmd_args->input_data_size <
  952. sizeof(struct cam_generic_fence_input_info)) {
  953. CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx",
  954. sizeof(struct cam_generic_fence_input_info),
  955. fence_cmd_args->input_data_size);
  956. return -EINVAL;
  957. }
  958. fence_input = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  959. fence_cmd_args->input_data_size);
  960. if (IS_ERR_OR_NULL(fence_input)) {
  961. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  962. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  963. return -ENOMEM;
  964. }
  965. /* Validate num fences */
  966. num_fences = fence_input->num_fences_requested;
  967. if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) {
  968. CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching",
  969. num_fences);
  970. rc = -EINVAL;
  971. goto free_mem;
  972. }
  973. /* Validate sizes */
  974. expected_size = sizeof(struct cam_generic_fence_input_info) +
  975. ((num_fences - 1) * sizeof(struct cam_generic_fence_config));
  976. if ((uint32_t)expected_size != fence_cmd_args->input_data_size) {
  977. CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u",
  978. expected_size, fence_cmd_args->input_data_size, num_fences);
  979. rc = -EINVAL;
  980. goto free_mem;
  981. }
  982. *fence_input_info = fence_input;
  983. return rc;
  984. free_mem:
  985. kfree(fence_input);
  986. return rc;
  987. }
  988. static void cam_generic_fence_free_input_info_util(
  989. struct cam_generic_fence_input_info **fence_input_info)
  990. {
  991. struct cam_generic_fence_input_info *fence_input = *fence_input_info;
  992. kfree(fence_input);
  993. *fence_input_info = NULL;
  994. }
  995. static int cam_generic_fence_handle_dma_create(
  996. struct cam_generic_fence_cmd_args *fence_cmd_args)
  997. {
  998. int rc, i, dma_fence_row_idx;
  999. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1000. struct cam_generic_fence_config *fence_cfg = NULL;
  1001. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1002. if (rc || !fence_input_info) {
  1003. CAM_ERR(CAM_DMA_FENCE,
  1004. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1005. rc, fence_input_info);
  1006. return -EINVAL;
  1007. }
  1008. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1009. fence_cfg = &fence_input_info->fence_cfg[i];
  1010. fence_input_info->num_fences_processed++;
  1011. fence_cfg->reason_code = 0;
  1012. rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd,
  1013. &dma_fence_row_idx, fence_cfg->name);
  1014. if (rc) {
  1015. CAM_ERR(CAM_DMA_FENCE,
  1016. "Failed to create dma fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1017. i, rc, fence_input_info->num_fences_requested,
  1018. fence_input_info->num_fences_processed);
  1019. fence_cfg->reason_code = rc;
  1020. goto out_copy;
  1021. }
  1022. CAM_DBG(CAM_DMA_FENCE,
  1023. "Created dma_fence @ i: %d fence fd: %d[%s] num fences [requested: %u processed: %u] ",
  1024. i, fence_cfg->dma_fence_fd, fence_cfg->name,
  1025. fence_input_info->num_fences_requested,
  1026. fence_input_info->num_fences_processed);
  1027. }
  1028. out_copy:
  1029. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1030. fence_input_info, fence_cmd_args->input_data_size)) {
  1031. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  1032. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1033. rc = -EFAULT;
  1034. }
  1035. cam_generic_fence_free_input_info_util(&fence_input_info);
  1036. return rc;
  1037. }
  1038. static int cam_generic_fence_handle_dma_release(
  1039. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1040. {
  1041. int rc, i;
  1042. bool failed = false;
  1043. struct cam_dma_fence_release_params release_params;
  1044. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1045. struct cam_generic_fence_config *fence_cfg = NULL;
  1046. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1047. if (rc || !fence_input_info) {
  1048. CAM_ERR(CAM_DMA_FENCE,
  1049. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1050. rc, fence_input_info);
  1051. return -EINVAL;
  1052. }
  1053. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1054. fence_cfg = &fence_input_info->fence_cfg[i];
  1055. fence_input_info->num_fences_processed++;
  1056. fence_cfg->reason_code = 0;
  1057. release_params.use_row_idx = false;
  1058. release_params.u.dma_fence_fd = fence_cfg->dma_fence_fd;
  1059. rc = cam_dma_fence_release(&release_params);
  1060. if (rc) {
  1061. CAM_ERR(CAM_DMA_FENCE,
  1062. "Failed to destroy dma fence at index: %d fd: %d rc: %d num fences [requested: %u processed: %u]",
  1063. i, fence_cfg->dma_fence_fd, rc,
  1064. fence_input_info->num_fences_requested,
  1065. fence_input_info->num_fences_processed);
  1066. fence_cfg->reason_code = rc;
  1067. /* Continue to release other fences, but mark the call as failed */
  1068. failed = true;
  1069. continue;
  1070. }
  1071. CAM_DBG(CAM_DMA_FENCE,
  1072. "Released dma_fence @ i: %d fd: %d num fences [requested: %u processed: %u]",
  1073. i, fence_cfg->dma_fence_fd,
  1074. fence_input_info->num_fences_requested,
  1075. fence_input_info->num_fences_processed);
  1076. }
  1077. if (failed)
  1078. rc = -ENOMSG;
  1079. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1080. fence_input_info, fence_cmd_args->input_data_size)) {
  1081. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  1082. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1083. rc = -EFAULT;
  1084. }
  1085. cam_generic_fence_free_input_info_util(&fence_input_info);
  1086. return rc;
  1087. }
  1088. static int cam_generic_fence_handle_dma_import(
  1089. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1090. {
  1091. int32_t rc, i, dma_fence_row_idx;
  1092. struct dma_fence *fence = NULL;
  1093. struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
  1094. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1095. struct cam_generic_fence_config *fence_cfg = NULL;
  1096. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1097. if (rc || !fence_input_info) {
  1098. CAM_ERR(CAM_DMA_FENCE,
  1099. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1100. rc, fence_input_info);
  1101. return -EINVAL;
  1102. }
  1103. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1104. fence_cfg = &fence_input_info->fence_cfg[i];
  1105. fence_input_info->num_fences_processed++;
  1106. fence_cfg->reason_code = 0;
  1107. /* Check if fd is for a valid dma fence */
  1108. fence = cam_dma_fence_get_fence_from_fd(fence_cfg->dma_fence_fd,
  1109. &dma_fence_row_idx);
  1110. if (IS_ERR_OR_NULL(fence)) {
  1111. CAM_ERR(CAM_DMA_FENCE,
  1112. "Invalid dma fence for fd: %d", fence_cfg->dma_fence_fd);
  1113. fence_cfg->reason_code = -EINVAL;
  1114. goto out_copy;
  1115. }
  1116. dma_sync_create.dma_fence_row_idx = dma_fence_row_idx;
  1117. dma_sync_create.fd = fence_cfg->dma_fence_fd;
  1118. dma_sync_create.sync_created_with_dma = false;
  1119. /* Create new sync object and associate dma fence */
  1120. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1121. &dma_sync_create, NULL);
  1122. if (rc) {
  1123. fence_cfg->reason_code = rc;
  1124. /* put on the import refcnt */
  1125. cam_dma_fence_get_put_ref(false, dma_fence_row_idx);
  1126. goto out_copy;
  1127. }
  1128. /* Register a cb for dma fence */
  1129. rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj,
  1130. &dma_fence_row_idx, cam_sync_dma_fence_cb);
  1131. if (rc) {
  1132. CAM_ERR(CAM_DMA_FENCE,
  1133. "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d",
  1134. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc);
  1135. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1136. NULL, NULL);
  1137. fence_cfg->reason_code = rc;
  1138. goto out_copy;
  1139. }
  1140. CAM_DBG(CAM_DMA_FENCE,
  1141. "dma fence fd = %d imported for sync_obj = %d[%s] num fences [requested: %u processed: %u]",
  1142. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, fence_cfg->name,
  1143. fence_input_info->num_fences_requested,
  1144. fence_input_info->num_fences_processed);
  1145. }
  1146. out_copy:
  1147. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1148. fence_input_info, fence_cmd_args->input_data_size)) {
  1149. rc = -EFAULT;
  1150. CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x",
  1151. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1152. }
  1153. cam_generic_fence_free_input_info_util(&fence_input_info);
  1154. return rc;
  1155. }
  1156. static int cam_generic_fence_handle_dma_signal(
  1157. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1158. {
  1159. struct cam_dma_fence_signal signal_dma_fence;
  1160. if (fence_cmd_args->input_data_size != sizeof(struct cam_dma_fence_signal)) {
  1161. CAM_ERR(CAM_DMA_FENCE, "Size is invalid expected: 0x%llx actual: 0x%llx",
  1162. sizeof(struct cam_dma_fence_signal),
  1163. fence_cmd_args->input_data_size);
  1164. return -EINVAL;
  1165. }
  1166. if (copy_from_user(&signal_dma_fence, (void __user *)fence_cmd_args->input_handle,
  1167. fence_cmd_args->input_data_size))
  1168. return -EFAULT;
  1169. return cam_dma_fence_signal_fd(&signal_dma_fence);
  1170. }
  1171. static int cam_generic_fence_process_dma_fence_cmd(
  1172. uint32_t id,
  1173. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1174. {
  1175. int rc = -EINVAL;
  1176. switch (id) {
  1177. case CAM_GENERIC_FENCE_CREATE:
  1178. rc = cam_generic_fence_handle_dma_create(fence_cmd_args);
  1179. break;
  1180. case CAM_GENERIC_FENCE_RELEASE:
  1181. rc = cam_generic_fence_handle_dma_release(fence_cmd_args);
  1182. break;
  1183. case CAM_GENERIC_FENCE_IMPORT:
  1184. rc = cam_generic_fence_handle_dma_import(fence_cmd_args);
  1185. break;
  1186. case CAM_GENERIC_FENCE_SIGNAL:
  1187. rc = cam_generic_fence_handle_dma_signal(fence_cmd_args);
  1188. break;
  1189. default:
  1190. CAM_ERR(CAM_DMA_FENCE, "IOCTL cmd: %u not supported for dma fence", id);
  1191. break;
  1192. }
  1193. return rc;
  1194. }
  1195. int cam_sync_synx_core_recovery(
  1196. enum cam_sync_synx_supported_cores core_id)
  1197. {
  1198. int rc = -EOPNOTSUPP;
  1199. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1200. rc = cam_synx_core_recovery(core_id);
  1201. #endif
  1202. return rc;
  1203. }
  1204. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1205. static int cam_generic_fence_validate_signal_input_info_util(
  1206. int32_t fence_type,
  1207. struct cam_generic_fence_cmd_args *fence_cmd_args,
  1208. struct cam_generic_fence_signal_info **fence_signal_info,
  1209. void **fence_signal_data)
  1210. {
  1211. int rc = 0;
  1212. struct cam_generic_fence_signal_info *signal_info = NULL;
  1213. void *signal_data;
  1214. uint32_t num_fences;
  1215. size_t expected_size;
  1216. *fence_signal_info = NULL;
  1217. *fence_signal_data = NULL;
  1218. if (fence_cmd_args->input_data_size !=
  1219. sizeof(struct cam_generic_fence_signal_info)) {
  1220. CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx",
  1221. sizeof(struct cam_generic_fence_signal_info),
  1222. fence_cmd_args->input_data_size);
  1223. return -EINVAL;
  1224. }
  1225. signal_info = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1226. fence_cmd_args->input_data_size);
  1227. if (IS_ERR_OR_NULL(signal_info)) {
  1228. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  1229. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1230. return -ENOMEM;
  1231. }
  1232. /* Validate num fences */
  1233. num_fences = signal_info->num_fences_requested;
  1234. if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) {
  1235. CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching",
  1236. num_fences);
  1237. rc = -EINVAL;
  1238. goto free_mem;
  1239. }
  1240. if (signal_info->fence_handle_type != CAM_HANDLE_USER_POINTER) {
  1241. CAM_ERR(CAM_SYNC, "Invalid signal handle type: %d",
  1242. signal_info->fence_handle_type);
  1243. rc = -EINVAL;
  1244. goto free_mem;
  1245. }
  1246. /* Validate sizes */
  1247. switch (fence_type) {
  1248. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  1249. expected_size = sizeof(struct cam_sync_signal);
  1250. break;
  1251. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  1252. expected_size = sizeof(struct cam_synx_obj_signal);
  1253. break;
  1254. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  1255. expected_size = sizeof(struct cam_dma_fence_signal);
  1256. break;
  1257. default:
  1258. CAM_ERR(CAM_SYNC, "Unsupported fence type: %u", fence_type);
  1259. rc = -EINVAL;
  1260. goto free_mem;
  1261. }
  1262. if ((signal_info->fence_data_size) != (expected_size * num_fences)) {
  1263. CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u",
  1264. (expected_size * num_fences), signal_info->fence_data_size, num_fences);
  1265. rc = -EINVAL;
  1266. goto free_mem;
  1267. }
  1268. signal_data = memdup_user(u64_to_user_ptr(signal_info->fence_info_hdl),
  1269. signal_info->fence_data_size);
  1270. if (IS_ERR_OR_NULL(signal_data)) {
  1271. CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x",
  1272. signal_info->fence_info_hdl, signal_info->fence_data_size);
  1273. rc = -ENOMEM;
  1274. goto free_mem;
  1275. }
  1276. *fence_signal_info = signal_info;
  1277. *fence_signal_data = signal_data;
  1278. return rc;
  1279. free_mem:
  1280. kfree(signal_info);
  1281. return rc;
  1282. }
  1283. static void cam_generic_fence_free_signal_input_info_util(
  1284. struct cam_generic_fence_signal_info **fence_signal_info,
  1285. void **fence_signal_data)
  1286. {
  1287. void *signal_data = *fence_signal_data;
  1288. struct cam_generic_fence_signal_info *fence_input = *fence_signal_info;
  1289. kfree(signal_data);
  1290. kfree(fence_input);
  1291. *fence_signal_info = NULL;
  1292. *fence_signal_data = NULL;
  1293. }
  1294. static int cam_generic_fence_config_parse_params(
  1295. struct cam_generic_fence_config *fence_cfg,
  1296. int32_t requested_param_mask, int32_t *result)
  1297. {
  1298. uint32_t index = 0, num_entries;
  1299. if (!result) {
  1300. CAM_ERR(CAM_SYNC, "Invalid result hdl : %p", result);
  1301. return -EINVAL;
  1302. }
  1303. /* Assign to 0 by default */
  1304. *result = 0;
  1305. if (!fence_cfg->num_valid_params || !requested_param_mask) {
  1306. CAM_DBG(CAM_SYNC,
  1307. "No params configured num_valid = %d requested_mask = 0x%x",
  1308. fence_cfg->num_valid_params, requested_param_mask);
  1309. return 0;
  1310. }
  1311. if (!(fence_cfg->valid_param_mask & requested_param_mask)) {
  1312. CAM_DBG(CAM_SYNC,
  1313. "Requested parameter not set in additional param mask expecting: 0x%x actual: 0x%x",
  1314. requested_param_mask, fence_cfg->valid_param_mask);
  1315. return 0;
  1316. }
  1317. index = ffs(requested_param_mask) - 1;
  1318. num_entries = ARRAY_SIZE(fence_cfg->params);
  1319. if (index >= num_entries) {
  1320. CAM_DBG(CAM_SYNC,
  1321. "Obtained index %u from mask: 0x%x num_param_entries: %u, index exceeding max",
  1322. index, requested_param_mask, num_entries);
  1323. return 0;
  1324. }
  1325. *result = fence_cfg->params[index];
  1326. return 0;
  1327. }
  1328. static int cam_generic_fence_handle_synx_create(
  1329. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1330. {
  1331. int rc, i;
  1332. int32_t row_idx, fence_flag;
  1333. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1334. struct cam_generic_fence_config *fence_cfg = NULL;
  1335. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1336. if (rc || !fence_input_info) {
  1337. CAM_ERR(CAM_SYNX,
  1338. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1339. rc, fence_input_info);
  1340. return -EINVAL;
  1341. }
  1342. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1343. fence_cfg = &fence_input_info->fence_cfg[i];
  1344. fence_input_info->num_fences_processed++;
  1345. fence_cfg->reason_code = 0;
  1346. fence_flag = 0;
  1347. cam_generic_fence_config_parse_params(fence_cfg,
  1348. CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX, &fence_flag);
  1349. rc = cam_synx_obj_create(fence_cfg->name,
  1350. fence_flag, &fence_cfg->synx_obj, &row_idx);
  1351. if (rc) {
  1352. CAM_ERR(CAM_SYNX,
  1353. "Failed to create synx fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1354. i, rc, fence_input_info->num_fences_requested,
  1355. fence_input_info->num_fences_processed);
  1356. fence_cfg->reason_code = rc;
  1357. goto out_copy;
  1358. }
  1359. CAM_DBG(CAM_SYNX,
  1360. "Created synx fence @ i: %d synx_obj: %d[%s] num fences [requested: %u processed: %u] ",
  1361. i, fence_cfg->synx_obj, fence_cfg->name,
  1362. fence_input_info->num_fences_requested,
  1363. fence_input_info->num_fences_processed);
  1364. }
  1365. out_copy:
  1366. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1367. fence_input_info, fence_cmd_args->input_data_size)) {
  1368. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1369. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1370. rc = -EFAULT;
  1371. }
  1372. cam_generic_fence_free_input_info_util(&fence_input_info);
  1373. return rc;
  1374. }
  1375. static int cam_generic_fence_handle_synx_release(
  1376. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1377. {
  1378. int rc, i;
  1379. bool failed = false;
  1380. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1381. struct cam_generic_fence_config *fence_cfg = NULL;
  1382. struct cam_synx_obj_release_params synx_release_params;
  1383. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1384. if (rc || !fence_input_info) {
  1385. CAM_ERR(CAM_SYNX,
  1386. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1387. rc, fence_input_info);
  1388. return -EINVAL;
  1389. }
  1390. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1391. fence_cfg = &fence_input_info->fence_cfg[i];
  1392. fence_input_info->num_fences_processed++;
  1393. fence_cfg->reason_code = 0;
  1394. synx_release_params.use_row_idx = false;
  1395. synx_release_params.u.synx_obj = fence_cfg->synx_obj;
  1396. rc = cam_synx_obj_release(&synx_release_params);
  1397. if (rc) {
  1398. CAM_ERR(CAM_SYNX,
  1399. "Failed to release synx object at index: %d rc: %d num fences [requested: %u processed: %u]",
  1400. i, rc, fence_input_info->num_fences_requested,
  1401. fence_input_info->num_fences_processed);
  1402. fence_cfg->reason_code = rc;
  1403. /* Continue to release other fences, but mark the call as failed */
  1404. failed = true;
  1405. continue;
  1406. }
  1407. CAM_DBG(CAM_SYNX,
  1408. "Released synx object @ i: %d handle: %d num fences [requested: %u processed: %u]",
  1409. i, fence_cfg->synx_obj,
  1410. fence_input_info->num_fences_requested,
  1411. fence_input_info->num_fences_processed);
  1412. }
  1413. if (failed)
  1414. rc = -ENOMSG;
  1415. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1416. fence_input_info, fence_cmd_args->input_data_size)) {
  1417. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1418. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1419. rc = -EFAULT;
  1420. }
  1421. cam_generic_fence_free_input_info_util(&fence_input_info);
  1422. return rc;
  1423. }
  1424. static int cam_sync_synx_associate_obj(int32_t sync_obj, uint32_t synx_obj,
  1425. int32_t synx_obj_row_idx, bool *is_sync_obj_signaled)
  1426. {
  1427. int rc;
  1428. struct sync_table_row *row = NULL;
  1429. struct cam_synx_obj_signal signal_synx_obj;
  1430. rc = cam_sync_check_valid(sync_obj);
  1431. if (rc)
  1432. return rc;
  1433. row = sync_dev->sync_table + sync_obj;
  1434. spin_lock(&sync_dev->row_spinlocks[sync_obj]);
  1435. if (row->state != CAM_SYNC_STATE_ACTIVE) {
  1436. signal_synx_obj.status = row->state;
  1437. signal_synx_obj.synx_obj = synx_obj;
  1438. *is_sync_obj_signaled = true;
  1439. goto signal_synx;
  1440. } else {
  1441. row->synx_obj_info.synx_obj_row_idx = synx_obj_row_idx;
  1442. row->synx_obj_info.sync_created_with_synx = false;
  1443. row->synx_obj_info.synx_obj = synx_obj;
  1444. set_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask);
  1445. CAM_DBG(CAM_SYNX, "sync_obj: %s[%d] associated with synx_obj: %d",
  1446. row->name, sync_obj, row->synx_obj_info.synx_obj);
  1447. }
  1448. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  1449. return rc;
  1450. signal_synx:
  1451. spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
  1452. return cam_synx_obj_signal_obj(&signal_synx_obj);
  1453. }
  1454. static int cam_generic_fence_handle_synx_import(
  1455. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1456. {
  1457. int32_t rc, i, synx_obj_row_idx;
  1458. struct sync_synx_obj_info synx_sync_create;
  1459. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1460. struct cam_generic_fence_config *fence_cfg = NULL;
  1461. bool is_sync_obj_signaled = false;
  1462. bool is_sync_obj_created = false;
  1463. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1464. if (rc || !fence_input_info) {
  1465. CAM_ERR(CAM_SYNX,
  1466. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1467. rc, fence_input_info);
  1468. return -EINVAL;
  1469. }
  1470. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1471. fence_cfg = &fence_input_info->fence_cfg[i];
  1472. fence_input_info->num_fences_processed++;
  1473. fence_cfg->reason_code = 0;
  1474. is_sync_obj_signaled = false;
  1475. is_sync_obj_created = false;
  1476. /* Check if synx handle is for a valid synx obj */
  1477. rc = cam_synx_obj_find_obj_in_table(fence_cfg->synx_obj,
  1478. &synx_obj_row_idx);
  1479. if (rc) {
  1480. CAM_ERR(CAM_SYNX,
  1481. "Invalid synx obj for handle: %d", fence_cfg->synx_obj);
  1482. fence_cfg->reason_code = -EINVAL;
  1483. goto out_copy;
  1484. }
  1485. if ((fence_cfg->sync_obj > 0) && (fence_cfg->sync_obj < CAM_SYNC_MAX_OBJS)) {
  1486. /* Associate synx object with existing sync object */
  1487. rc = cam_sync_synx_associate_obj(fence_cfg->sync_obj,
  1488. fence_cfg->synx_obj, synx_obj_row_idx,
  1489. &is_sync_obj_signaled);
  1490. } else {
  1491. /* Create new sync object and associate synx object */
  1492. synx_sync_create.sync_created_with_synx = false;
  1493. synx_sync_create.synx_obj = fence_cfg->synx_obj;
  1494. synx_sync_create.synx_obj_row_idx = synx_obj_row_idx;
  1495. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1496. NULL, &synx_sync_create);
  1497. is_sync_obj_created = true;
  1498. }
  1499. if (rc) {
  1500. fence_cfg->reason_code = rc;
  1501. goto out_copy;
  1502. }
  1503. if (!is_sync_obj_signaled) {
  1504. /* Register a cb for synx_obj */
  1505. rc = cam_synx_obj_register_cb(&fence_cfg->sync_obj,
  1506. synx_obj_row_idx, cam_sync_synx_obj_cb);
  1507. if (rc) {
  1508. CAM_ERR(CAM_SYNX,
  1509. "Failed to register cb for synx_obj: %d sync_obj: %d rc: %d",
  1510. fence_cfg->synx_obj, fence_cfg->sync_obj, rc);
  1511. if (is_sync_obj_created)
  1512. cam_sync_deinit_object(sync_dev->sync_table,
  1513. fence_cfg->sync_obj, NULL, NULL);
  1514. fence_cfg->reason_code = rc;
  1515. goto out_copy;
  1516. }
  1517. }
  1518. CAM_DBG(CAM_SYNX,
  1519. "synx_obj handle = %d imported for dma fence fd: %d sync_obj = %d[%s] num fences [requested: %u processed: %u]",
  1520. fence_cfg->synx_obj, fence_cfg->dma_fence_fd,
  1521. fence_cfg->sync_obj, fence_cfg->name,
  1522. fence_input_info->num_fences_requested,
  1523. fence_input_info->num_fences_processed);
  1524. }
  1525. out_copy:
  1526. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1527. fence_input_info, fence_cmd_args->input_data_size)) {
  1528. rc = -EFAULT;
  1529. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1530. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1531. }
  1532. cam_generic_fence_free_input_info_util(&fence_input_info);
  1533. return rc;
  1534. }
  1535. static int cam_generic_fence_handle_synx_signal(
  1536. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1537. {
  1538. int32_t rc, i;
  1539. struct cam_generic_fence_signal_info *fence_signal_info;
  1540. struct cam_synx_obj_signal *synx_signal_info;
  1541. rc = cam_generic_fence_validate_signal_input_info_util(
  1542. CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, fence_cmd_args,
  1543. &fence_signal_info, (void **)&synx_signal_info);
  1544. if (rc || !fence_signal_info || !synx_signal_info) {
  1545. CAM_ERR(CAM_SYNX,
  1546. "Fence input signal info validation failed rc: %d fence_input_info: %pK synx_signal_info: %pK",
  1547. rc, fence_signal_info, synx_signal_info);
  1548. return -EINVAL;
  1549. }
  1550. for (i = 0; i < fence_signal_info->num_fences_requested; i++) {
  1551. fence_signal_info->num_fences_processed++;
  1552. rc = cam_synx_obj_signal_obj(&synx_signal_info[i]);
  1553. if (rc) {
  1554. CAM_ERR(CAM_SYNX,
  1555. "Failed to signal for synx_obj: %d, rc: %d, status : %d",
  1556. synx_signal_info[i].synx_obj, rc,
  1557. synx_signal_info[i].status);
  1558. }
  1559. synx_signal_info[i].reason_code = rc;
  1560. }
  1561. if (copy_to_user(u64_to_user_ptr(fence_signal_info->fence_info_hdl), synx_signal_info,
  1562. fence_signal_info->fence_data_size)) {
  1563. rc = -EFAULT;
  1564. CAM_ERR(CAM_SYNX, "copy to user for signal data failed hdl: %d size: 0x%x",
  1565. fence_cmd_args->input_handle,
  1566. (sizeof(struct cam_synx_obj_signal) *
  1567. fence_signal_info->num_fences_requested));
  1568. goto end;
  1569. }
  1570. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1571. fence_signal_info, sizeof(struct cam_generic_fence_signal_info))) {
  1572. rc = -EFAULT;
  1573. CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x",
  1574. fence_cmd_args->input_handle,
  1575. sizeof(struct cam_generic_fence_signal_info));
  1576. }
  1577. end:
  1578. cam_generic_fence_free_signal_input_info_util(&fence_signal_info,
  1579. (void **)&synx_signal_info);
  1580. return rc;
  1581. }
  1582. static int cam_generic_fence_process_synx_obj_cmd(
  1583. uint32_t id,
  1584. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1585. {
  1586. int rc = -EINVAL;
  1587. switch (id) {
  1588. case CAM_GENERIC_FENCE_CREATE:
  1589. rc = cam_generic_fence_handle_synx_create(fence_cmd_args);
  1590. break;
  1591. case CAM_GENERIC_FENCE_RELEASE:
  1592. rc = cam_generic_fence_handle_synx_release(fence_cmd_args);
  1593. break;
  1594. case CAM_GENERIC_FENCE_IMPORT:
  1595. rc = cam_generic_fence_handle_synx_import(fence_cmd_args);
  1596. break;
  1597. case CAM_GENERIC_FENCE_SIGNAL:
  1598. rc = cam_generic_fence_handle_synx_signal(fence_cmd_args);
  1599. break;
  1600. default:
  1601. CAM_ERR(CAM_SYNX, "IOCTL cmd: %u not supported for synx object", id);
  1602. break;
  1603. }
  1604. return rc;
  1605. }
  1606. #endif
  1607. static int cam_generic_fence_handle_sync_create(
  1608. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1609. {
  1610. int rc, i, dma_fence_row_idx;
  1611. bool dma_fence_created;
  1612. unsigned long fence_sel_mask;
  1613. struct cam_dma_fence_release_params release_params;
  1614. struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
  1615. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1616. struct cam_generic_fence_config *fence_cfg = NULL;
  1617. bool synx_obj_created = false;
  1618. struct sync_synx_obj_info synx_obj_create;
  1619. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1620. int32_t fence_flag;
  1621. int32_t synx_obj_row_idx = 0;
  1622. struct cam_synx_obj_release_params synx_release_params;
  1623. struct dma_fence *dma_fence_ptr;
  1624. #endif
  1625. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1626. if (rc || !fence_input_info) {
  1627. CAM_ERR(CAM_SYNC,
  1628. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1629. rc, fence_input_info);
  1630. return -EINVAL;
  1631. }
  1632. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1633. fence_cfg = &fence_input_info->fence_cfg[i];
  1634. fence_input_info->num_fences_processed++;
  1635. fence_cfg->reason_code = 0;
  1636. /* Reset flag */
  1637. dma_fence_created = false;
  1638. synx_obj_created = false;
  1639. fence_sel_mask = fence_cfg->fence_sel_mask;
  1640. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) {
  1641. rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd,
  1642. &dma_fence_row_idx, fence_cfg->name);
  1643. if (rc) {
  1644. CAM_ERR(CAM_SYNC,
  1645. "Failed to create dma fence at index: %d rc: %d num_fences: %u",
  1646. i, rc, fence_input_info->num_fences_requested);
  1647. fence_cfg->reason_code = rc;
  1648. goto out_copy;
  1649. }
  1650. dma_sync_create.dma_fence_row_idx = dma_fence_row_idx;
  1651. dma_sync_create.fd = fence_cfg->dma_fence_fd;
  1652. dma_sync_create.sync_created_with_dma = true;
  1653. dma_fence_created = true;
  1654. }
  1655. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1656. /* Create a synx object */
  1657. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) {
  1658. if (dma_fence_created) {
  1659. dma_fence_ptr = cam_dma_fence_get_fence_from_fd(
  1660. dma_sync_create.fd, &dma_fence_row_idx);
  1661. rc = cam_synx_obj_import_dma_fence(fence_cfg->name,
  1662. fence_cfg->params[0], dma_fence_ptr,
  1663. &fence_cfg->synx_obj, &synx_obj_row_idx);
  1664. } else {
  1665. cam_generic_fence_config_parse_params(fence_cfg,
  1666. CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX, &fence_flag);
  1667. rc = cam_synx_obj_create(fence_cfg->name,
  1668. fence_flag, &fence_cfg->synx_obj,
  1669. &synx_obj_row_idx);
  1670. }
  1671. if (rc) {
  1672. CAM_ERR(CAM_SYNC,
  1673. "Failed to create/import synx obj at index: %d rc: %d num_fences: %u",
  1674. i, rc, fence_input_info->num_fences_requested);
  1675. /* Release dma fence */
  1676. if (dma_fence_created) {
  1677. release_params.use_row_idx = true;
  1678. release_params.u.dma_row_idx = dma_fence_row_idx;
  1679. cam_dma_fence_release(&release_params);
  1680. }
  1681. /* Release synx obj */
  1682. if (synx_obj_created) {
  1683. synx_release_params.use_row_idx = true;
  1684. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1685. cam_synx_obj_release(&synx_release_params);
  1686. }
  1687. goto out_copy;
  1688. }
  1689. synx_obj_create.sync_created_with_synx = true;
  1690. synx_obj_create.synx_obj = fence_cfg->synx_obj;
  1691. synx_obj_create.synx_obj_row_idx = synx_obj_row_idx;
  1692. synx_obj_created = true;
  1693. }
  1694. #endif
  1695. rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name,
  1696. (dma_fence_created ? &dma_sync_create : NULL),
  1697. (synx_obj_created ? &synx_obj_create : NULL));
  1698. if (rc) {
  1699. fence_cfg->reason_code = rc;
  1700. CAM_ERR(CAM_SYNC,
  1701. "Failed to create sync obj at index: %d rc: %d num_fences: %u",
  1702. i, rc, fence_input_info->num_fences_requested);
  1703. /* Release dma fence */
  1704. if (dma_fence_created) {
  1705. release_params.use_row_idx = true;
  1706. release_params.u.dma_row_idx = dma_fence_row_idx;
  1707. cam_dma_fence_release(&release_params);
  1708. }
  1709. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1710. /* Release synx obj */
  1711. if (synx_obj_created) {
  1712. synx_release_params.use_row_idx = true;
  1713. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1714. cam_synx_obj_release(&synx_release_params);
  1715. }
  1716. #endif
  1717. goto out_copy;
  1718. }
  1719. /* Register dma fence cb */
  1720. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) {
  1721. rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj,
  1722. &dma_fence_row_idx, cam_sync_dma_fence_cb);
  1723. if (rc) {
  1724. CAM_ERR(CAM_SYNC,
  1725. "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d",
  1726. fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc);
  1727. fence_cfg->reason_code = rc;
  1728. /* Destroy sync obj */
  1729. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1730. NULL, NULL);
  1731. /* Release dma fence */
  1732. if (dma_fence_created) {
  1733. release_params.use_row_idx = true;
  1734. release_params.u.dma_row_idx = dma_fence_row_idx;
  1735. cam_dma_fence_release(&release_params);
  1736. }
  1737. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1738. /* Release synx obj */
  1739. if (synx_obj_created) {
  1740. synx_release_params.use_row_idx = true;
  1741. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1742. cam_synx_obj_release(&synx_release_params);
  1743. }
  1744. #endif
  1745. goto out_copy;
  1746. }
  1747. }
  1748. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1749. /* Register synx object callback */
  1750. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) {
  1751. rc = cam_synx_obj_register_cb(&fence_cfg->sync_obj,
  1752. synx_obj_row_idx, cam_sync_synx_obj_cb);
  1753. if (rc) {
  1754. CAM_ERR(CAM_SYNC,
  1755. "Failed to register cb for synx_obj: %d sync_obj: %d rc: %d",
  1756. fence_cfg->synx_obj, fence_cfg->sync_obj, rc);
  1757. fence_cfg->reason_code = rc;
  1758. /* Destroy sync obj */
  1759. cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1760. NULL, NULL);
  1761. /* Release dma fence */
  1762. if (dma_fence_created) {
  1763. release_params.use_row_idx = true;
  1764. release_params.u.dma_row_idx = dma_fence_row_idx;
  1765. cam_dma_fence_release(&release_params);
  1766. }
  1767. /* Release synx obj */
  1768. if (synx_obj_created) {
  1769. synx_release_params.use_row_idx = true;
  1770. synx_release_params.u.synx_row_idx = synx_obj_row_idx;
  1771. cam_synx_obj_release(&synx_release_params);
  1772. }
  1773. goto out_copy;
  1774. }
  1775. }
  1776. #endif
  1777. CAM_DBG(CAM_SYNC,
  1778. "Created sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d num fences [requested: %u processed: %u]",
  1779. fence_cfg->sync_obj, fence_cfg->name,
  1780. fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd,
  1781. fence_input_info->num_fences_requested,
  1782. fence_input_info->num_fences_processed);
  1783. }
  1784. out_copy:
  1785. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1786. fence_input_info, fence_cmd_args->input_data_size)) {
  1787. rc = -EFAULT;
  1788. CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x",
  1789. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1790. }
  1791. cam_generic_fence_free_input_info_util(&fence_input_info);
  1792. return rc;
  1793. }
  1794. static int cam_generic_fence_handle_sync_release(
  1795. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1796. {
  1797. bool failed = false;
  1798. int rc, i;
  1799. unsigned long fence_sel_mask;
  1800. struct cam_sync_check_for_dma_release check_for_dma_release;
  1801. struct cam_dma_fence_release_params release_params;
  1802. struct cam_generic_fence_input_info *fence_input_info = NULL;
  1803. struct cam_generic_fence_config *fence_cfg = NULL;
  1804. struct cam_sync_check_for_synx_release check_for_synx_release;
  1805. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1806. struct cam_synx_obj_release_params synx_release_params;
  1807. #endif
  1808. rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info);
  1809. if (rc || !fence_input_info) {
  1810. CAM_ERR(CAM_SYNC,
  1811. "Fence input info validation failed rc: %d fence_input_info: %pK",
  1812. rc, fence_input_info);
  1813. return -EINVAL;
  1814. }
  1815. for (i = 0; i < fence_input_info->num_fences_requested; i++) {
  1816. fence_cfg = &fence_input_info->fence_cfg[i];
  1817. fence_input_info->num_fences_processed++;
  1818. /* Reset fields */
  1819. fence_cfg->reason_code = 0;
  1820. check_for_dma_release.sync_created_with_dma = false;
  1821. check_for_dma_release.dma_fence_fd = fence_cfg->dma_fence_fd;
  1822. check_for_synx_release.sync_created_with_synx = false;
  1823. check_for_synx_release.synx_obj = fence_cfg->synx_obj;
  1824. rc = cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj,
  1825. &check_for_dma_release, &check_for_synx_release);
  1826. if (rc) {
  1827. fence_cfg->reason_code = rc;
  1828. failed = true;
  1829. CAM_ERR(CAM_SYNC,
  1830. "Failed to release sync obj at index: %d rc: %d num_fences [requested: %u processed: %u]",
  1831. i, rc, fence_input_info->num_fences_requested,
  1832. fence_input_info->num_fences_processed);
  1833. }
  1834. fence_sel_mask = fence_cfg->fence_sel_mask;
  1835. if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) {
  1836. if (!check_for_dma_release.sync_created_with_dma) {
  1837. CAM_ERR(CAM_SYNC,
  1838. "Failed to release dma fence fd: %d with sync_obj: %d, not created together",
  1839. fence_cfg->dma_fence_fd, fence_cfg->sync_obj);
  1840. failed = true;
  1841. fence_cfg->reason_code = -EPERM;
  1842. continue;
  1843. }
  1844. release_params.use_row_idx = true;
  1845. release_params.u.dma_row_idx = check_for_dma_release.dma_fence_row_idx;
  1846. rc = cam_dma_fence_release(&release_params);
  1847. if (rc) {
  1848. CAM_ERR(CAM_SYNC,
  1849. "Failed to destroy dma fence at index: %d rc: %d num fences [requested: %u processed: %u]",
  1850. i, rc, fence_input_info->num_fences_requested,
  1851. fence_input_info->num_fences_processed);
  1852. fence_cfg->reason_code = rc;
  1853. failed = true;
  1854. continue;
  1855. }
  1856. }
  1857. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1858. /* Release associated synx obj */
  1859. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) {
  1860. if (!check_for_synx_release.sync_created_with_synx) {
  1861. CAM_ERR(CAM_SYNC,
  1862. "Failed to release synx_obj: %d with sync_obj: %d, not created together",
  1863. fence_cfg->synx_obj, fence_cfg->sync_obj);
  1864. failed = true;
  1865. fence_cfg->reason_code = -EPERM;
  1866. continue;
  1867. }
  1868. synx_release_params.use_row_idx = true;
  1869. synx_release_params.u.synx_row_idx =
  1870. check_for_synx_release.synx_obj_row_idx;
  1871. rc = cam_synx_obj_release(&synx_release_params);
  1872. if (rc) {
  1873. CAM_ERR(CAM_SYNC,
  1874. "Failed to destroy synx_obj at index: %d rc: %d num fences [requested: %u processed: %u]",
  1875. i, rc, fence_input_info->num_fences_requested,
  1876. fence_input_info->num_fences_processed);
  1877. fence_cfg->reason_code = rc;
  1878. failed = true;
  1879. continue;
  1880. }
  1881. }
  1882. #endif
  1883. CAM_DBG(CAM_SYNC,
  1884. "Released sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d synx_obj: %d num fences [requested: %u processed: %u]",
  1885. fence_cfg->sync_obj, fence_cfg->name,
  1886. fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd, fence_cfg->synx_obj,
  1887. fence_input_info->num_fences_requested,
  1888. fence_input_info->num_fences_processed);
  1889. }
  1890. if (failed)
  1891. rc = -ENOMSG;
  1892. if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle),
  1893. fence_input_info, fence_cmd_args->input_data_size)) {
  1894. rc = -EFAULT;
  1895. CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x",
  1896. fence_cmd_args->input_handle, fence_cmd_args->input_data_size);
  1897. }
  1898. cam_generic_fence_free_input_info_util(&fence_input_info);
  1899. return rc;
  1900. }
  1901. static int cam_generic_fence_process_sync_obj_cmd(
  1902. uint32_t id,
  1903. struct cam_generic_fence_cmd_args *fence_cmd_args)
  1904. {
  1905. int rc = -EINVAL;
  1906. switch (id) {
  1907. case CAM_GENERIC_FENCE_CREATE:
  1908. rc = cam_generic_fence_handle_sync_create(fence_cmd_args);
  1909. break;
  1910. case CAM_GENERIC_FENCE_RELEASE:
  1911. rc = cam_generic_fence_handle_sync_release(fence_cmd_args);
  1912. break;
  1913. default:
  1914. CAM_ERR(CAM_SYNC, "IOCTL cmd: %u not supported for sync object", id);
  1915. break;
  1916. }
  1917. return rc;
  1918. }
  1919. static int cam_generic_fence_parser(
  1920. struct cam_private_ioctl_arg *k_ioctl)
  1921. {
  1922. int rc;
  1923. struct cam_generic_fence_cmd_args fence_cmd_args;
  1924. if (!k_ioctl->ioctl_ptr) {
  1925. CAM_ERR(CAM_SYNC, "Invalid args input ptr: %p",
  1926. k_ioctl->ioctl_ptr);
  1927. return -EINVAL;
  1928. }
  1929. if (k_ioctl->size != sizeof(struct cam_generic_fence_cmd_args)) {
  1930. CAM_ERR(CAM_SYNC, "Size mismatch expected: 0x%llx actual: 0x%llx",
  1931. sizeof(struct cam_generic_fence_cmd_args), k_ioctl->size);
  1932. return -EINVAL;
  1933. }
  1934. if (copy_from_user(&fence_cmd_args, u64_to_user_ptr(k_ioctl->ioctl_ptr),
  1935. sizeof(fence_cmd_args))) {
  1936. CAM_ERR(CAM_SYNC, "copy from user failed for input ptr: %pK",
  1937. k_ioctl->ioctl_ptr);
  1938. return -EFAULT;
  1939. }
  1940. if (fence_cmd_args.input_handle_type != CAM_HANDLE_USER_POINTER) {
  1941. CAM_ERR(CAM_SYNC, "Invalid handle type: %u",
  1942. fence_cmd_args.input_handle_type);
  1943. return -EINVAL;
  1944. }
  1945. switch (fence_cmd_args.fence_type) {
  1946. case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
  1947. rc = cam_generic_fence_process_sync_obj_cmd(k_ioctl->id, &fence_cmd_args);
  1948. break;
  1949. case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
  1950. rc = cam_generic_fence_process_dma_fence_cmd(k_ioctl->id, &fence_cmd_args);
  1951. break;
  1952. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  1953. case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
  1954. rc = cam_generic_fence_process_synx_obj_cmd(k_ioctl->id, &fence_cmd_args);
  1955. break;
  1956. #endif
  1957. default:
  1958. rc = -EINVAL;
  1959. CAM_ERR(CAM_SYNC, "fence type: 0x%x handling not supported",
  1960. fence_cmd_args.fence_type);
  1961. break;
  1962. }
  1963. return rc;
  1964. }
  1965. static long cam_sync_dev_ioctl(struct file *filep, void *fh,
  1966. bool valid_prio, unsigned int cmd, void *arg)
  1967. {
  1968. int32_t rc;
  1969. struct sync_device *sync_dev = video_drvdata(filep);
  1970. struct cam_private_ioctl_arg k_ioctl;
  1971. if (!sync_dev) {
  1972. CAM_ERR(CAM_SYNC, "sync_dev NULL");
  1973. return -EINVAL;
  1974. }
  1975. if (!arg)
  1976. return -EINVAL;
  1977. if (cmd != CAM_PRIVATE_IOCTL_CMD)
  1978. return -ENOIOCTLCMD;
  1979. k_ioctl = *(struct cam_private_ioctl_arg *)arg;
  1980. switch (k_ioctl.id) {
  1981. case CAM_SYNC_CREATE:
  1982. rc = cam_sync_handle_create(&k_ioctl);
  1983. break;
  1984. case CAM_SYNC_DESTROY:
  1985. rc = cam_sync_handle_destroy(&k_ioctl);
  1986. break;
  1987. case CAM_SYNC_REGISTER_PAYLOAD:
  1988. rc = cam_sync_handle_register_user_payload(
  1989. &k_ioctl);
  1990. break;
  1991. case CAM_SYNC_DEREGISTER_PAYLOAD:
  1992. rc = cam_sync_handle_deregister_user_payload(
  1993. &k_ioctl);
  1994. break;
  1995. case CAM_SYNC_SIGNAL:
  1996. rc = cam_sync_handle_signal(&k_ioctl);
  1997. break;
  1998. case CAM_SYNC_MERGE:
  1999. rc = cam_sync_handle_merge(&k_ioctl);
  2000. break;
  2001. case CAM_SYNC_WAIT:
  2002. rc = cam_sync_handle_wait(&k_ioctl);
  2003. ((struct cam_private_ioctl_arg *)arg)->result =
  2004. k_ioctl.result;
  2005. break;
  2006. case CAM_GENERIC_FENCE_CREATE:
  2007. case CAM_GENERIC_FENCE_RELEASE:
  2008. case CAM_GENERIC_FENCE_IMPORT:
  2009. case CAM_GENERIC_FENCE_SIGNAL:
  2010. rc = cam_generic_fence_parser(&k_ioctl);
  2011. break;
  2012. default:
  2013. rc = -ENOIOCTLCMD;
  2014. }
  2015. return rc;
  2016. }
  2017. static unsigned int cam_sync_poll(struct file *f,
  2018. struct poll_table_struct *pll_table)
  2019. {
  2020. int rc = 0;
  2021. struct v4l2_fh *eventq = f->private_data;
  2022. if (!eventq)
  2023. return -EINVAL;
  2024. poll_wait(f, &eventq->wait, pll_table);
  2025. if (v4l2_event_pending(eventq))
  2026. rc = POLLPRI;
  2027. return rc;
  2028. }
  2029. static int cam_sync_open(struct file *filep)
  2030. {
  2031. int rc;
  2032. struct sync_device *sync_dev = video_drvdata(filep);
  2033. if (!sync_dev) {
  2034. CAM_ERR(CAM_SYNC, "Sync device NULL");
  2035. return -ENODEV;
  2036. }
  2037. mutex_lock(&sync_dev->table_lock);
  2038. if (sync_dev->open_cnt >= 1) {
  2039. mutex_unlock(&sync_dev->table_lock);
  2040. return -EALREADY;
  2041. }
  2042. rc = v4l2_fh_open(filep);
  2043. if (!rc) {
  2044. sync_dev->open_cnt++;
  2045. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2046. cam_synx_obj_open();
  2047. #endif
  2048. cam_dma_fence_open();
  2049. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  2050. sync_dev->cam_sync_eventq = filep->private_data;
  2051. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  2052. } else {
  2053. CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
  2054. }
  2055. if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
  2056. sync_dev->mon_data = kzalloc(
  2057. sizeof(struct cam_generic_fence_monitor_data *) *
  2058. CAM_SYNC_MONITOR_TABLE_SIZE, GFP_KERNEL);
  2059. if (!sync_dev->mon_data) {
  2060. CAM_WARN(CAM_SYNC, "Failed to allocate memory %d",
  2061. sizeof(struct cam_generic_fence_monitor_data *) *
  2062. CAM_SYNC_MONITOR_TABLE_SIZE);
  2063. }
  2064. }
  2065. mutex_unlock(&sync_dev->table_lock);
  2066. return rc;
  2067. }
  2068. static int cam_sync_close(struct file *filep)
  2069. {
  2070. int rc = 0, i;
  2071. struct sync_device *sync_dev = video_drvdata(filep);
  2072. if (!sync_dev) {
  2073. CAM_ERR(CAM_SYNC, "Sync device NULL");
  2074. rc = -ENODEV;
  2075. return rc;
  2076. }
  2077. mutex_lock(&sync_dev->table_lock);
  2078. sync_dev->open_cnt--;
  2079. if (!sync_dev->open_cnt) {
  2080. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  2081. struct sync_table_row *row =
  2082. sync_dev->sync_table + i;
  2083. /*
  2084. * Signal all ACTIVE objects as ERR, but we don't
  2085. * care about the return status here apart from logging
  2086. * it.
  2087. */
  2088. if (row->state == CAM_SYNC_STATE_ACTIVE) {
  2089. rc = cam_sync_signal(i,
  2090. CAM_SYNC_STATE_SIGNALED_ERROR,
  2091. CAM_SYNC_COMMON_RELEASE_EVENT);
  2092. if (rc < 0)
  2093. CAM_ERR(CAM_SYNC,
  2094. "Cleanup signal fail idx:%d", i);
  2095. }
  2096. }
  2097. /*
  2098. * Flush the work queue to wait for pending signal callbacks to
  2099. * finish
  2100. */
  2101. flush_workqueue(sync_dev->work_queue);
  2102. /*
  2103. * Now that all callbacks worker threads have finished,
  2104. * destroy the sync objects
  2105. */
  2106. for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
  2107. struct sync_table_row *row =
  2108. sync_dev->sync_table + i;
  2109. if (row->state != CAM_SYNC_STATE_INVALID) {
  2110. rc = cam_sync_destroy(i);
  2111. if (rc < 0)
  2112. CAM_ERR(CAM_SYNC,
  2113. "Cleanup destroy fail:idx:%d\n", i);
  2114. }
  2115. }
  2116. if (sync_dev->mon_data) {
  2117. for (i = 0; i < CAM_SYNC_MONITOR_TABLE_SIZE; i++) {
  2118. kfree(sync_dev->mon_data[i]);
  2119. sync_dev->mon_data[i] = NULL;
  2120. }
  2121. }
  2122. kfree(sync_dev->mon_data);
  2123. sync_dev->mon_data = NULL;
  2124. }
  2125. /* Clean dma fence table */
  2126. cam_dma_fence_close();
  2127. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2128. /* Clean synx obj table */
  2129. cam_synx_obj_close();
  2130. #endif
  2131. mutex_unlock(&sync_dev->table_lock);
  2132. spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
  2133. sync_dev->cam_sync_eventq = NULL;
  2134. spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
  2135. v4l2_fh_release(filep);
  2136. return rc;
  2137. }
  2138. static void cam_sync_event_queue_notify_error(const struct v4l2_event *old,
  2139. struct v4l2_event *new)
  2140. {
  2141. if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) {
  2142. struct cam_sync_ev_header_v2 *ev_header;
  2143. ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old));
  2144. CAM_ERR(CAM_CRM,
  2145. "Failed to notify event id %d fence %d statue %d reason %u %u %u %u",
  2146. old->id, ev_header->sync_obj, ev_header->status,
  2147. ev_header->evt_param[0], ev_header->evt_param[1],
  2148. ev_header->evt_param[2], ev_header->evt_param[3]);
  2149. } else {
  2150. struct cam_sync_ev_header *ev_header;
  2151. ev_header = CAM_SYNC_GET_HEADER_PTR((*old));
  2152. CAM_ERR(CAM_CRM,
  2153. "Failed to notify event id %d fence %d statue %d",
  2154. old->id, ev_header->sync_obj, ev_header->status);
  2155. }
  2156. }
  2157. static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = {
  2158. .merge = cam_sync_event_queue_notify_error,
  2159. };
  2160. int cam_sync_subscribe_event(struct v4l2_fh *fh,
  2161. const struct v4l2_event_subscription *sub)
  2162. {
  2163. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  2164. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  2165. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  2166. return -EINVAL;
  2167. }
  2168. sync_dev->version = sub->type;
  2169. CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version);
  2170. return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS,
  2171. &cam_sync_v4l2_ops);
  2172. }
  2173. int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
  2174. const struct v4l2_event_subscription *sub)
  2175. {
  2176. if (!((sub->type == CAM_SYNC_V4L_EVENT) ||
  2177. (sub->type == CAM_SYNC_V4L_EVENT_V2))) {
  2178. CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type);
  2179. return -EINVAL;
  2180. }
  2181. return v4l2_event_unsubscribe(fh, sub);
  2182. }
  2183. static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
  2184. .vidioc_subscribe_event = cam_sync_subscribe_event,
  2185. .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
  2186. .vidioc_default = cam_sync_dev_ioctl,
  2187. };
  2188. static struct v4l2_file_operations cam_sync_v4l2_fops = {
  2189. .owner = THIS_MODULE,
  2190. .open = cam_sync_open,
  2191. .release = cam_sync_close,
  2192. .poll = cam_sync_poll,
  2193. .unlocked_ioctl = video_ioctl2,
  2194. #ifdef CONFIG_COMPAT
  2195. .compat_ioctl32 = video_ioctl2,
  2196. #endif
  2197. };
  2198. #if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER)
  2199. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  2200. struct platform_device *pdev)
  2201. {
  2202. int rc;
  2203. sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
  2204. GFP_KERNEL);
  2205. if (!sync_dev->v4l2_dev.mdev)
  2206. return -ENOMEM;
  2207. media_device_init(sync_dev->v4l2_dev.mdev);
  2208. strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
  2209. sizeof(sync_dev->v4l2_dev.mdev->model));
  2210. sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
  2211. rc = media_device_register(sync_dev->v4l2_dev.mdev);
  2212. if (rc < 0)
  2213. goto register_fail;
  2214. rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
  2215. if (rc < 0)
  2216. goto entity_fail;
  2217. return 0;
  2218. entity_fail:
  2219. media_device_unregister(sync_dev->v4l2_dev.mdev);
  2220. register_fail:
  2221. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  2222. return rc;
  2223. }
  2224. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  2225. {
  2226. media_entity_cleanup(&sync_dev->vdev->entity);
  2227. media_device_unregister(sync_dev->v4l2_dev.mdev);
  2228. media_device_cleanup(sync_dev->v4l2_dev.mdev);
  2229. kfree(sync_dev->v4l2_dev.mdev);
  2230. }
  2231. static void cam_sync_init_entity(struct sync_device *sync_dev)
  2232. {
  2233. sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
  2234. sync_dev->vdev->entity.name =
  2235. video_device_node_name(sync_dev->vdev);
  2236. }
  2237. #else
  2238. static int cam_sync_media_controller_init(struct sync_device *sync_dev,
  2239. struct platform_device *pdev)
  2240. {
  2241. return 0;
  2242. }
  2243. static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
  2244. {
  2245. }
  2246. static void cam_sync_init_entity(struct sync_device *sync_dev)
  2247. {
  2248. }
  2249. #endif
  2250. static int cam_sync_create_debugfs(void)
  2251. {
  2252. int rc;
  2253. struct dentry *dbgfileptr = NULL;
  2254. if (!cam_debugfs_available())
  2255. return 0;
  2256. rc = cam_debugfs_create_subdir("sync", &dbgfileptr);
  2257. if (rc) {
  2258. CAM_ERR(CAM_SYNC,"DebugFS could not create directory!");
  2259. rc = -ENOENT;
  2260. goto end;
  2261. }
  2262. /* Store parent inode for cleanup in caller */
  2263. sync_dev->dentry = dbgfileptr;
  2264. debugfs_create_bool("trigger_cb_without_switch", 0644,
  2265. sync_dev->dentry, &trigger_cb_without_switch);
  2266. debugfs_create_ulong("cam_sync_monitor_mask", 0644,
  2267. sync_dev->dentry, &cam_sync_monitor_mask);
  2268. end:
  2269. return rc;
  2270. }
  2271. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  2272. int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
  2273. {
  2274. int rc;
  2275. uint32_t sync_status = synx_status;
  2276. switch (synx_status) {
  2277. case SYNX_STATE_ACTIVE:
  2278. sync_status = CAM_SYNC_STATE_ACTIVE;
  2279. break;
  2280. case SYNX_STATE_SIGNALED_SUCCESS:
  2281. sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
  2282. break;
  2283. case SYNX_STATE_SIGNALED_ERROR:
  2284. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  2285. break;
  2286. case 4: /* SYNX_STATE_SIGNALED_CANCEL: */
  2287. sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL;
  2288. break;
  2289. default:
  2290. CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d",
  2291. synx_status, sync_obj);
  2292. sync_status = CAM_SYNC_STATE_SIGNALED_ERROR;
  2293. break;
  2294. }
  2295. rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX);
  2296. if (rc) {
  2297. CAM_ERR(CAM_SYNC,
  2298. "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d",
  2299. sync_obj, synx_status, sync_status, rc);
  2300. }
  2301. return rc;
  2302. }
  2303. int cam_synx_sync_register_callback(sync_callback cb_func,
  2304. void *userdata, int32_t sync_obj)
  2305. {
  2306. return cam_sync_register_callback(cb_func, userdata, sync_obj);
  2307. }
  2308. int cam_synx_sync_deregister_callback(sync_callback cb_func,
  2309. void *userdata, int32_t sync_obj)
  2310. {
  2311. return cam_sync_deregister_callback(cb_func, userdata, sync_obj);
  2312. }
  2313. static int cam_sync_register_synx_bind_ops(
  2314. struct synx_register_params *object)
  2315. {
  2316. int rc;
  2317. rc = synx_register_ops(object);
  2318. if (rc)
  2319. CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc);
  2320. return rc;
  2321. }
  2322. static void cam_sync_unregister_synx_bind_ops(
  2323. struct synx_register_params *object)
  2324. {
  2325. int rc;
  2326. rc = synx_deregister_ops(object);
  2327. if (rc)
  2328. CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc);
  2329. }
  2330. static void cam_sync_configure_synx_obj(struct synx_register_params *object)
  2331. {
  2332. struct synx_register_params *params = object;
  2333. params->name = CAM_SYNC_NAME;
  2334. params->type = SYNX_TYPE_CSL;
  2335. params->ops.register_callback = cam_synx_sync_register_callback;
  2336. params->ops.deregister_callback = cam_synx_sync_deregister_callback;
  2337. params->ops.enable_signaling = cam_sync_get_obj_ref;
  2338. params->ops.signal = cam_synx_sync_signal;
  2339. }
  2340. #endif
  2341. static int cam_sync_component_bind(struct device *dev,
  2342. struct device *master_dev, void *data)
  2343. {
  2344. int rc, idx;
  2345. struct platform_device *pdev = to_platform_device(dev);
  2346. sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
  2347. if (!sync_dev)
  2348. return -ENOMEM;
  2349. sync_dev->sync_table = vzalloc(sizeof(struct sync_table_row) * CAM_SYNC_MAX_OBJS);
  2350. if (!sync_dev->sync_table) {
  2351. CAM_ERR(CAM_SYNC, "Mem Allocation failed for sync table");
  2352. kfree(sync_dev);
  2353. return -ENOMEM;
  2354. }
  2355. mutex_init(&sync_dev->table_lock);
  2356. spin_lock_init(&sync_dev->cam_sync_eventq_lock);
  2357. for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
  2358. spin_lock_init(&sync_dev->row_spinlocks[idx]);
  2359. sync_dev->vdev = video_device_alloc();
  2360. if (!sync_dev->vdev) {
  2361. rc = -ENOMEM;
  2362. goto vdev_fail;
  2363. }
  2364. rc = cam_sync_media_controller_init(sync_dev, pdev);
  2365. if (rc < 0)
  2366. goto mcinit_fail;
  2367. sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
  2368. rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
  2369. if (rc < 0)
  2370. goto register_fail;
  2371. strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
  2372. sizeof(sync_dev->vdev->name));
  2373. sync_dev->vdev->release = video_device_release_empty;
  2374. sync_dev->vdev->fops = &cam_sync_v4l2_fops;
  2375. sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
  2376. sync_dev->vdev->minor = -1;
  2377. sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  2378. sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO;
  2379. rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1);
  2380. if (rc < 0) {
  2381. CAM_ERR(CAM_SYNC,
  2382. "video device registration failure rc = %d, name = %s, device_caps = %d",
  2383. rc, sync_dev->vdev->name, sync_dev->vdev->device_caps);
  2384. goto v4l2_fail;
  2385. }
  2386. cam_sync_init_entity(sync_dev);
  2387. video_set_drvdata(sync_dev->vdev, sync_dev);
  2388. bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
  2389. /*
  2390. * We treat zero as invalid handle, so we will keep the 0th bit set
  2391. * always
  2392. */
  2393. set_bit(0, sync_dev->bitmap);
  2394. sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
  2395. WQ_HIGHPRI | WQ_UNBOUND, 1);
  2396. if (!sync_dev->work_queue) {
  2397. CAM_ERR(CAM_SYNC,
  2398. "Error: high priority work queue creation failed");
  2399. rc = -ENOMEM;
  2400. goto v4l2_fail;
  2401. }
  2402. /* Initialize dma fence driver */
  2403. rc = cam_dma_fence_driver_init();
  2404. if (rc) {
  2405. CAM_ERR(CAM_SYNC,
  2406. "DMA fence driver initialization failed rc: %d", rc);
  2407. goto workq_destroy;
  2408. }
  2409. trigger_cb_without_switch = false;
  2410. cam_sync_monitor_mask = 0;
  2411. cam_sync_create_debugfs();
  2412. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2413. /* Initialize synx obj driver */
  2414. rc = cam_synx_obj_driver_init();
  2415. if (rc) {
  2416. CAM_ERR(CAM_SYNC,
  2417. "Synx obj driver initialization failed rc: %d", rc);
  2418. goto dma_driver_deinit;
  2419. }
  2420. #elif IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  2421. CAM_DBG(CAM_SYNC, "Registering with synx driver");
  2422. cam_sync_configure_synx_obj(&sync_dev->params);
  2423. rc = cam_sync_register_synx_bind_ops(&sync_dev->params);
  2424. if (rc)
  2425. goto dma_driver_deinit;
  2426. #endif
  2427. CAM_DBG(CAM_SYNC, "Component bound successfully");
  2428. return rc;
  2429. #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2430. dma_driver_deinit:
  2431. cam_dma_fence_driver_deinit();
  2432. #endif
  2433. workq_destroy:
  2434. destroy_workqueue(sync_dev->work_queue);
  2435. v4l2_fail:
  2436. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  2437. register_fail:
  2438. cam_sync_media_controller_cleanup(sync_dev);
  2439. mcinit_fail:
  2440. video_unregister_device(sync_dev->vdev);
  2441. video_device_release(sync_dev->vdev);
  2442. vdev_fail:
  2443. vfree(sync_dev->sync_table);
  2444. mutex_destroy(&sync_dev->table_lock);
  2445. kfree(sync_dev);
  2446. return rc;
  2447. }
  2448. static void cam_sync_component_unbind(struct device *dev,
  2449. struct device *master_dev, void *data)
  2450. {
  2451. int i;
  2452. v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
  2453. cam_sync_media_controller_cleanup(sync_dev);
  2454. #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
  2455. cam_synx_obj_driver_deinit();
  2456. #elif IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
  2457. cam_sync_unregister_synx_bind_ops(&sync_dev->params);
  2458. #endif
  2459. video_unregister_device(sync_dev->vdev);
  2460. video_device_release(sync_dev->vdev);
  2461. sync_dev->dentry = NULL;
  2462. cam_dma_fence_driver_deinit();
  2463. for (i = 0; i < CAM_SYNC_MAX_OBJS; i++)
  2464. spin_lock_init(&sync_dev->row_spinlocks[i]);
  2465. vfree(sync_dev->sync_table);
  2466. kfree(sync_dev);
  2467. sync_dev = NULL;
  2468. }
  2469. const static struct component_ops cam_sync_component_ops = {
  2470. .bind = cam_sync_component_bind,
  2471. .unbind = cam_sync_component_unbind,
  2472. };
  2473. static int cam_sync_probe(struct platform_device *pdev)
  2474. {
  2475. int rc;
  2476. CAM_DBG(CAM_SYNC, "Adding Sync component");
  2477. rc = component_add(&pdev->dev, &cam_sync_component_ops);
  2478. if (rc)
  2479. CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc);
  2480. return rc;
  2481. }
  2482. static int cam_sync_remove(struct platform_device *pdev)
  2483. {
  2484. component_del(&pdev->dev, &cam_sync_component_ops);
  2485. return 0;
  2486. }
  2487. static const struct of_device_id cam_sync_dt_match[] = {
  2488. {.compatible = "qcom,cam-sync"},
  2489. {}
  2490. };
  2491. MODULE_DEVICE_TABLE(of, cam_sync_dt_match);
  2492. struct platform_driver cam_sync_driver = {
  2493. .probe = cam_sync_probe,
  2494. .remove = cam_sync_remove,
  2495. .driver = {
  2496. .name = "cam_sync",
  2497. .owner = THIS_MODULE,
  2498. .of_match_table = cam_sync_dt_match,
  2499. .suppress_bind_attrs = true,
  2500. },
  2501. };
  2502. int cam_sync_init(void)
  2503. {
  2504. return platform_driver_register(&cam_sync_driver);
  2505. }
  2506. void cam_sync_exit(void)
  2507. {
  2508. platform_driver_unregister(&cam_sync_driver);
  2509. }
  2510. MODULE_DESCRIPTION("Camera sync driver");
  2511. MODULE_LICENSE("GPL v2");