cam_isp_context.c 241 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/videodev2.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ratelimit.h>
  11. #include "cam_mem_mgr.h"
  12. #include "cam_sync_api.h"
  13. #include "cam_req_mgr_dev.h"
  14. #include "cam_trace.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_packet_util.h"
  17. #include "cam_context_utils.h"
  18. #include "cam_cdm_util.h"
  19. #include "cam_isp_context.h"
  20. #include "cam_common_util.h"
  21. #include "cam_req_mgr_debug.h"
  22. #include "cam_cpas_api.h"
  23. #include "cam_ife_hw_mgr.h"
  24. static const char isp_dev_name[] = "cam-isp";
  25. static struct cam_isp_ctx_debug isp_ctx_debug;
  26. #define INC_HEAD(head, max_entries, ret) \
  27. div_u64_rem(atomic64_add_return(1, head),\
  28. max_entries, (ret))
  29. static int cam_isp_context_dump_requests(void *data,
  30. void *pf_args);
  31. static int cam_isp_context_hw_recovery(void *priv, void *data);
  32. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  33. struct cam_start_stop_dev_cmd *cmd);
  34. static void __cam_isp_ctx_dump_state_monitor_array(
  35. struct cam_isp_context *ctx_isp);
  36. static const char *__cam_isp_hw_evt_val_to_type(
  37. uint32_t evt_id);
  38. static const char *__cam_isp_ctx_substate_val_to_type(
  39. enum cam_isp_ctx_activated_substate type);
  40. static int __cam_isp_ctx_check_deferred_buf_done(
  41. struct cam_isp_context *ctx_isp,
  42. struct cam_isp_hw_done_event_data *done,
  43. uint32_t bubble_state);
  44. static const char *__cam_isp_evt_val_to_type(
  45. uint32_t evt_id)
  46. {
  47. switch (evt_id) {
  48. case CAM_ISP_CTX_EVENT_SUBMIT:
  49. return "SUBMIT";
  50. case CAM_ISP_CTX_EVENT_APPLY:
  51. return "APPLY";
  52. case CAM_ISP_CTX_EVENT_EPOCH:
  53. return "EPOCH";
  54. case CAM_ISP_CTX_EVENT_RUP:
  55. return "RUP";
  56. case CAM_ISP_CTX_EVENT_BUFDONE:
  57. return "BUFDONE";
  58. default:
  59. return "CAM_ISP_EVENT_INVALID";
  60. }
  61. }
  62. static void __cam_isp_ctx_update_event_record(
  63. struct cam_isp_context *ctx_isp,
  64. enum cam_isp_ctx_event event,
  65. struct cam_ctx_request *req)
  66. {
  67. int iterator = 0;
  68. ktime_t cur_time;
  69. struct cam_isp_ctx_req *req_isp;
  70. if (!ctx_isp) {
  71. CAM_ERR(CAM_ISP, "Invalid Args");
  72. return;
  73. }
  74. switch (event) {
  75. case CAM_ISP_CTX_EVENT_EPOCH:
  76. case CAM_ISP_CTX_EVENT_RUP:
  77. case CAM_ISP_CTX_EVENT_BUFDONE:
  78. break;
  79. case CAM_ISP_CTX_EVENT_SUBMIT:
  80. case CAM_ISP_CTX_EVENT_APPLY:
  81. if (!req) {
  82. CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
  83. return;
  84. }
  85. break;
  86. default:
  87. break;
  88. }
  89. INC_HEAD(&ctx_isp->event_record_head[event],
  90. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
  91. cur_time = ktime_get();
  92. if (req) {
  93. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  94. ctx_isp->event_record[event][iterator].req_id =
  95. req->request_id;
  96. req_isp->event_timestamp[event] = cur_time;
  97. } else {
  98. ctx_isp->event_record[event][iterator].req_id = 0;
  99. }
  100. ctx_isp->event_record[event][iterator].timestamp = cur_time;
  101. }
  102. static void *cam_isp_ctx_user_dump_events(
  103. void *dump_struct, uint8_t *addr_ptr)
  104. {
  105. uint64_t *addr;
  106. struct cam_isp_context_event_record *record;
  107. struct timespec64 ts;
  108. record = (struct cam_isp_context_event_record *)dump_struct;
  109. addr = (uint64_t *)addr_ptr;
  110. ts = ktime_to_timespec64(record->timestamp);
  111. *addr++ = record->req_id;
  112. *addr++ = ts.tv_sec;
  113. *addr++ = ts.tv_nsec / NSEC_PER_USEC;
  114. return addr;
  115. }
  116. static int __cam_isp_ctx_dump_event_record(
  117. struct cam_isp_context *ctx_isp,
  118. struct cam_common_hw_dump_args *dump_args)
  119. {
  120. int i, j, rc = 0;
  121. int index;
  122. size_t remain_len;
  123. uint32_t oldest_entry, num_entries;
  124. uint32_t min_len;
  125. uint64_t state_head;
  126. struct cam_isp_context_event_record *record;
  127. if (!dump_args || !ctx_isp) {
  128. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  129. dump_args, ctx_isp);
  130. return -EINVAL;
  131. }
  132. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  133. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  134. if (state_head == -1) {
  135. return 0;
  136. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  137. num_entries = state_head + 1;
  138. oldest_entry = 0;
  139. } else {
  140. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  141. div_u64_rem(state_head + 1,
  142. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  143. &oldest_entry);
  144. }
  145. index = oldest_entry;
  146. if (dump_args->buf_len <= dump_args->offset) {
  147. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  148. dump_args->buf_len, dump_args->offset);
  149. return -ENOSPC;
  150. }
  151. min_len = sizeof(struct cam_isp_context_dump_header) +
  152. ((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
  153. sizeof(uint64_t));
  154. remain_len = dump_args->buf_len - dump_args->offset;
  155. if (remain_len < min_len) {
  156. CAM_WARN(CAM_ISP,
  157. "Dump buffer exhaust remain %zu min %u",
  158. remain_len, min_len);
  159. return -ENOSPC;
  160. }
  161. for (j = 0; j < num_entries; j++) {
  162. record = &ctx_isp->event_record[i][index];
  163. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_events,
  164. record, sizeof(uint64_t), "ISP_EVT_%s:",
  165. __cam_isp_evt_val_to_type(i));
  166. if (rc) {
  167. CAM_ERR(CAM_ISP,
  168. "CAM_ISP_CONTEXT DUMP_EVENT_RECORD: Dump failed, rc: %d",
  169. rc);
  170. return rc;
  171. }
  172. index = (index + 1) %
  173. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  174. }
  175. }
  176. return rc;
  177. }
  178. static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req,
  179. uint8_t *start_addr, uint8_t *end_addr,
  180. unsigned long *bytes_updated)
  181. {
  182. struct cam_isp_ctx_req_mini_dump *req_md;
  183. struct cam_buf_io_cfg *io_cfg;
  184. struct cam_isp_ctx_req *req_isp;
  185. struct cam_packet *packet = NULL;
  186. unsigned long bytes_required = 0;
  187. bytes_required = sizeof(*req_md);
  188. *bytes_updated = 0;
  189. if (start_addr + bytes_required > end_addr)
  190. return;
  191. req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr;
  192. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  193. req_md->num_acked = req_isp->num_acked;
  194. req_md->num_deferred_acks = req_isp->num_deferred_acks;
  195. req_md->bubble_report = req_isp->bubble_report;
  196. req_md->bubble_detected = req_isp->bubble_detected;
  197. req_md->reapply_type = req_isp->reapply_type;
  198. req_md->request_id = req->request_id;
  199. *bytes_updated += bytes_required;
  200. if (req_isp->num_fence_map_out) {
  201. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  202. req_isp->num_fence_map_out;
  203. if (start_addr + *bytes_updated + bytes_required > end_addr)
  204. return;
  205. req_md->map_out = (struct cam_hw_fence_map_entry *)
  206. ((uint8_t *)start_addr + *bytes_updated);
  207. memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required);
  208. req_md->num_fence_map_out = req_isp->num_fence_map_out;
  209. *bytes_updated += bytes_required;
  210. }
  211. if (req_isp->num_fence_map_in) {
  212. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  213. req_isp->num_fence_map_in;
  214. if (start_addr + *bytes_updated + bytes_required > end_addr)
  215. return;
  216. req_md->map_in = (struct cam_hw_fence_map_entry *)
  217. ((uint8_t *)start_addr + *bytes_updated);
  218. memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required);
  219. req_md->num_fence_map_in = req_isp->num_fence_map_in;
  220. *bytes_updated += bytes_required;
  221. }
  222. packet = req_isp->hw_update_data.packet;
  223. if (packet && packet->num_io_configs) {
  224. bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg);
  225. if (start_addr + *bytes_updated + bytes_required > end_addr)
  226. return;
  227. io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
  228. packet->io_configs_offset / 4);
  229. req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated);
  230. memcpy(req_md->io_cfg, io_cfg, bytes_required);
  231. *bytes_updated += bytes_required;
  232. req_md->num_io_cfg = packet->num_io_configs;
  233. }
  234. }
  235. static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
  236. {
  237. struct cam_isp_ctx_mini_dump_info *md;
  238. struct cam_isp_context *ctx_isp;
  239. struct cam_context *ctx;
  240. struct cam_ctx_request *req, *req_temp;
  241. struct cam_hw_mini_dump_args *dump_args;
  242. uint8_t *start_addr;
  243. uint8_t *end_addr;
  244. unsigned long total_bytes = 0;
  245. unsigned long bytes_updated = 0;
  246. uint32_t i;
  247. if (!priv || !args) {
  248. CAM_ERR(CAM_ISP, "invalid params");
  249. return 0;
  250. }
  251. dump_args = (struct cam_hw_mini_dump_args *)args;
  252. if (dump_args->len < sizeof(*md)) {
  253. CAM_ERR(CAM_ISP,
  254. "In sufficient size received %lu required size: %zu",
  255. dump_args->len, sizeof(*md));
  256. return 0;
  257. }
  258. ctx = (struct cam_context *)priv;
  259. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  260. start_addr = (uint8_t *)dump_args->start_addr;
  261. end_addr = start_addr + dump_args->len;
  262. md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr;
  263. md->sof_timestamp_val = ctx_isp->sof_timestamp_val;
  264. md->boot_timestamp = ctx_isp->boot_timestamp;
  265. md->last_sof_timestamp = ctx_isp->last_sof_timestamp;
  266. md->init_timestamp = ctx_isp->init_timestamp;
  267. md->frame_id = ctx_isp->frame_id;
  268. md->reported_req_id = ctx_isp->reported_req_id;
  269. md->last_applied_req_id = ctx_isp->last_applied_req_id;
  270. md->last_bufdone_err_apply_req_id =
  271. ctx_isp->last_bufdone_err_apply_req_id;
  272. md->frame_id_meta = ctx_isp->frame_id_meta;
  273. md->substate_activated = ctx_isp->substate_activated;
  274. md->ctx_id = ctx->ctx_id;
  275. md->subscribe_event = ctx_isp->subscribe_event;
  276. md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt;
  277. md->isp_device_type = ctx_isp->isp_device_type;
  278. md->active_req_cnt = ctx_isp->active_req_cnt;
  279. md->trigger_id = ctx_isp->trigger_id;
  280. md->rdi_only_context = ctx_isp->rdi_only_context;
  281. md->offline_context = ctx_isp->offline_context;
  282. md->hw_acquired = ctx_isp->hw_acquired;
  283. md->init_received = ctx_isp->init_received;
  284. md->split_acquire = ctx_isp->split_acquire;
  285. md->use_frame_header_ts = ctx_isp->use_frame_header_ts;
  286. md->support_consumed_addr = ctx_isp->support_consumed_addr;
  287. md->use_default_apply = ctx_isp->use_default_apply;
  288. md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress);
  289. md->process_bubble = atomic_read(&ctx_isp->process_bubble);
  290. md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
  291. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  292. memcpy(md->event_record[i], ctx_isp->event_record[i],
  293. sizeof(struct cam_isp_context_event_record) *
  294. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
  295. }
  296. total_bytes += sizeof(*md);
  297. if (start_addr + total_bytes >= end_addr)
  298. goto end;
  299. if (!list_empty(&ctx->active_req_list)) {
  300. md->active_list = (struct cam_isp_ctx_req_mini_dump *)
  301. (start_addr + total_bytes);
  302. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  303. bytes_updated = 0;
  304. __cam_isp_ctx_req_mini_dump(req,
  305. (uint8_t *)&md->active_list[md->active_cnt++],
  306. end_addr, &bytes_updated);
  307. total_bytes += bytes_updated;
  308. if ((start_addr + total_bytes >= end_addr))
  309. goto end;
  310. }
  311. }
  312. if (!list_empty(&ctx->wait_req_list)) {
  313. md->wait_list = (struct cam_isp_ctx_req_mini_dump *)
  314. (start_addr + total_bytes);
  315. list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
  316. bytes_updated = 0;
  317. __cam_isp_ctx_req_mini_dump(req,
  318. (uint8_t *)&md->wait_list[md->wait_cnt++],
  319. end_addr, &bytes_updated);
  320. total_bytes += bytes_updated;
  321. if ((start_addr + total_bytes >= end_addr))
  322. goto end;
  323. }
  324. }
  325. if (!list_empty(&ctx->pending_req_list)) {
  326. md->pending_list = (struct cam_isp_ctx_req_mini_dump *)
  327. (start_addr + total_bytes);
  328. list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
  329. bytes_updated = 0;
  330. __cam_isp_ctx_req_mini_dump(req,
  331. (uint8_t *)&md->pending_list[md->pending_cnt++],
  332. end_addr, &bytes_updated);
  333. total_bytes += bytes_updated;
  334. if ((start_addr + total_bytes >= end_addr))
  335. goto end;
  336. }
  337. }
  338. end:
  339. dump_args->bytes_written = total_bytes;
  340. return 0;
  341. }
  342. static void __cam_isp_ctx_update_state_monitor_array(
  343. struct cam_isp_context *ctx_isp,
  344. enum cam_isp_state_change_trigger trigger_type,
  345. uint64_t req_id)
  346. {
  347. int iterator;
  348. INC_HEAD(&ctx_isp->state_monitor_head,
  349. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
  350. ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
  351. ctx_isp->substate_activated;
  352. ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
  353. ctx_isp->frame_id;
  354. ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
  355. trigger_type;
  356. ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
  357. req_id;
  358. ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
  359. jiffies_to_msecs(jiffies) - ctx_isp->init_timestamp;
  360. }
  361. static const char *__cam_isp_ctx_substate_val_to_type(
  362. enum cam_isp_ctx_activated_substate type)
  363. {
  364. switch (type) {
  365. case CAM_ISP_CTX_ACTIVATED_SOF:
  366. return "SOF";
  367. case CAM_ISP_CTX_ACTIVATED_APPLIED:
  368. return "APPLIED";
  369. case CAM_ISP_CTX_ACTIVATED_EPOCH:
  370. return "EPOCH";
  371. case CAM_ISP_CTX_ACTIVATED_BUBBLE:
  372. return "BUBBLE";
  373. case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
  374. return "BUBBLE_APPLIED";
  375. case CAM_ISP_CTX_ACTIVATED_HW_ERROR:
  376. return "HW_ERROR";
  377. case CAM_ISP_CTX_ACTIVATED_HALT:
  378. return "HALT";
  379. default:
  380. return "INVALID";
  381. }
  382. }
  383. static const char *__cam_isp_hw_evt_val_to_type(
  384. uint32_t evt_id)
  385. {
  386. switch (evt_id) {
  387. case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
  388. return "ERROR";
  389. case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED:
  390. return "APPLIED";
  391. case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
  392. return "SOF";
  393. case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
  394. return "REG_UPDATE";
  395. case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
  396. return "EPOCH";
  397. case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
  398. return "EOF";
  399. case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
  400. return "DONE";
  401. case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
  402. return "FLUSH";
  403. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF:
  404. return "SEC_EVT_SOF";
  405. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH:
  406. return "SEC_EVT_EPOCH";
  407. case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP:
  408. return "OUT_OF_SYNC_FRAME_DROP";
  409. default:
  410. return "CAM_ISP_EVENT_INVALID";
  411. }
  412. }
  413. static void __cam_isp_ctx_dump_state_monitor_array(
  414. struct cam_isp_context *ctx_isp)
  415. {
  416. int i = 0;
  417. int64_t state_head = 0;
  418. uint32_t index, num_entries, oldest_entry;
  419. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  420. if (state_head == -1) {
  421. return;
  422. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  423. num_entries = state_head;
  424. oldest_entry = 0;
  425. } else {
  426. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  427. div_u64_rem(state_head + 1,
  428. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  429. }
  430. CAM_ERR(CAM_ISP,
  431. "Dumping state information for preceding requests");
  432. index = oldest_entry;
  433. for (i = 0; i < num_entries; i++) {
  434. CAM_ERR(CAM_ISP,
  435. "Index[%d] time[%d] : Substate[%s] Frame[%lld] ReqId[%llu] evt_type[%s]",
  436. index,
  437. ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
  438. __cam_isp_ctx_substate_val_to_type(
  439. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  440. ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
  441. ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
  442. __cam_isp_hw_evt_val_to_type(
  443. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  444. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  445. }
  446. }
  447. static void *cam_isp_ctx_user_dump_state_monitor_array_info(
  448. void *dump_struct, uint8_t *addr_ptr)
  449. {
  450. struct cam_isp_context_state_monitor *evt = NULL;
  451. uint64_t *addr;
  452. evt = (struct cam_isp_context_state_monitor *)dump_struct;
  453. addr = (uint64_t *)addr_ptr;
  454. *addr++ = evt->evt_time_stamp;
  455. *addr++ = evt->frame_id;
  456. *addr++ = evt->req_id;
  457. return addr;
  458. }
  459. static int __cam_isp_ctx_user_dump_state_monitor_array(
  460. struct cam_isp_context *ctx_isp,
  461. struct cam_common_hw_dump_args *dump_args)
  462. {
  463. int i, rc = 0;
  464. int index;
  465. uint32_t oldest_entry;
  466. uint32_t num_entries;
  467. uint64_t state_head;
  468. if (!dump_args || !ctx_isp) {
  469. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  470. dump_args, ctx_isp);
  471. return -EINVAL;
  472. }
  473. state_head = 0;
  474. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  475. if (state_head == -1) {
  476. return 0;
  477. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  478. num_entries = state_head;
  479. oldest_entry = 0;
  480. } else {
  481. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  482. div_u64_rem(state_head + 1,
  483. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  484. }
  485. CAM_ERR(CAM_ISP,
  486. "Dumping state information for preceding requests");
  487. index = oldest_entry;
  488. for (i = 0; i < num_entries; i++) {
  489. rc = cam_common_user_dump_helper(dump_args,
  490. cam_isp_ctx_user_dump_state_monitor_array_info,
  491. &ctx_isp->cam_isp_ctx_state_monitor[index],
  492. sizeof(uint64_t), "ISP_STATE_MONITOR.%s.%s:",
  493. __cam_isp_ctx_substate_val_to_type(
  494. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  495. __cam_isp_hw_evt_val_to_type(
  496. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  497. if (rc) {
  498. CAM_ERR(CAM_ISP, "CAM ISP CONTEXT: Event record dump failed, rc: %d", rc);
  499. return rc;
  500. }
  501. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  502. }
  503. return rc;
  504. }
  505. static int cam_isp_context_info_dump(void *context,
  506. enum cam_context_dump_id id)
  507. {
  508. struct cam_context *ctx = (struct cam_context *)context;
  509. switch (id) {
  510. case CAM_CTX_DUMP_ACQ_INFO: {
  511. cam_context_dump_hw_acq_info(ctx);
  512. break;
  513. }
  514. default:
  515. CAM_DBG(CAM_ISP, "DUMP id not valid %u", id);
  516. break;
  517. }
  518. return 0;
  519. }
  520. static const char *__cam_isp_ctx_crm_trigger_point_to_string(
  521. int trigger_point)
  522. {
  523. switch (trigger_point) {
  524. case CAM_TRIGGER_POINT_SOF:
  525. return "SOF";
  526. case CAM_TRIGGER_POINT_EOF:
  527. return "EOF";
  528. default:
  529. return "Invalid";
  530. }
  531. }
  532. static int __cam_isp_ctx_notify_trigger_util(
  533. int trigger_type, struct cam_isp_context *ctx_isp)
  534. {
  535. int rc = -EINVAL;
  536. struct cam_context *ctx = ctx_isp->base;
  537. struct cam_req_mgr_trigger_notify notify;
  538. /* Trigger type not supported, return */
  539. if (!(ctx_isp->subscribe_event & trigger_type)) {
  540. CAM_DBG(CAM_ISP,
  541. "%s trigger point not subscribed for in mask: %u in ctx: %u on link: 0x%x last_bufdone: %lld",
  542. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  543. ctx_isp->subscribe_event, ctx->ctx_id, ctx->link_hdl,
  544. ctx_isp->req_info.last_bufdone_req_id);
  545. return 0;
  546. }
  547. /* Skip CRM notify when recovery is in progress */
  548. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  549. CAM_DBG(CAM_ISP,
  550. "Internal recovery in progress skip notifying %s trigger point in ctx: %u on link: 0x%x",
  551. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  552. ctx->ctx_id, ctx->link_hdl);
  553. return 0;
  554. }
  555. notify.link_hdl = ctx->link_hdl;
  556. notify.dev_hdl = ctx->dev_hdl;
  557. notify.frame_id = ctx_isp->frame_id;
  558. notify.trigger = trigger_type;
  559. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  560. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  561. notify.trigger_id = ctx_isp->trigger_id;
  562. CAM_DBG(CAM_ISP,
  563. "Notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld",
  564. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  565. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  566. ctx_isp->req_info.last_bufdone_req_id);
  567. rc = ctx->ctx_crm_intf->notify_trigger(&notify);
  568. if (rc)
  569. CAM_ERR(CAM_ISP,
  570. "Failed to notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld rc: %d",
  571. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  572. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  573. ctx_isp->req_info.last_bufdone_req_id, rc);
  574. return rc;
  575. }
  576. static int __cam_isp_ctx_notify_v4l2_error_event(
  577. uint32_t error_type, uint32_t error_code,
  578. uint64_t error_request_id, struct cam_context *ctx)
  579. {
  580. int rc = 0;
  581. struct cam_req_mgr_message req_msg;
  582. req_msg.session_hdl = ctx->session_hdl;
  583. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  584. req_msg.u.err_msg.error_type = error_type;
  585. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  586. req_msg.u.err_msg.request_id = error_request_id;
  587. req_msg.u.err_msg.resource_size = 0x0;
  588. req_msg.u.err_msg.error_code = error_code;
  589. CAM_DBG(CAM_ISP,
  590. "v4l2 error event [type: %u code: %u] for req: %llu in ctx: %u on link: 0x%x notified successfully",
  591. error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl);
  592. rc = cam_req_mgr_notify_message(&req_msg,
  593. V4L_EVENT_CAM_REQ_MGR_ERROR,
  594. V4L_EVENT_CAM_REQ_MGR_EVENT);
  595. if (rc)
  596. CAM_ERR(CAM_ISP,
  597. "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu in ctx %u on link: 0x%x",
  598. error_request_id, ctx->ctx_id);
  599. return rc;
  600. }
  601. static int __cam_isp_ctx_notify_error_util(
  602. uint32_t trigger_type, enum cam_req_mgr_device_error error,
  603. uint64_t req_id, struct cam_isp_context *ctx_isp)
  604. {
  605. int rc = -EINVAL;
  606. struct cam_context *ctx = ctx_isp->base;
  607. struct cam_req_mgr_error_notify notify;
  608. notify.link_hdl = ctx->link_hdl;
  609. notify.dev_hdl = ctx->dev_hdl;
  610. notify.req_id = req_id;
  611. notify.error = error;
  612. notify.trigger = trigger_type;
  613. notify.frame_id = ctx_isp->frame_id;
  614. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  615. if (error == CRM_KMD_ERR_BUBBLE)
  616. CAM_WARN(CAM_ISP,
  617. "Notify CRM about bubble req: %llu frame: %llu in ctx: %u on link: 0x%x",
  618. req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  619. else
  620. CAM_ERR(CAM_ISP,
  621. "Notify CRM about fatal error: %u req: %llu frame: %llu in ctx: %u on link: 0x%x",
  622. error, req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  623. rc = ctx->ctx_crm_intf->notify_err(&notify);
  624. if (rc)
  625. CAM_ERR(CAM_ISP,
  626. "Failed to notify error: %u for req: %lu on ctx: %u in link: 0x%x",
  627. error, req_id, ctx->ctx_id, ctx->link_hdl);
  628. return rc;
  629. }
  630. static int __cam_isp_ctx_trigger_reg_dump(
  631. enum cam_hw_mgr_command cmd,
  632. struct cam_context *ctx)
  633. {
  634. int rc = 0;
  635. struct cam_hw_cmd_args hw_cmd_args;
  636. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  637. hw_cmd_args.cmd_type = cmd;
  638. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  639. &hw_cmd_args);
  640. if (rc) {
  641. CAM_ERR(CAM_ISP, "Reg dump on error failed ctx: %u link: 0x%x rc: %d",
  642. ctx->ctx_id, ctx->link_hdl, rc);
  643. goto end;
  644. }
  645. CAM_DBG(CAM_ISP,
  646. "Reg dump type: %u successful in ctx: %u on link: 0x%x",
  647. cmd, ctx->ctx_id, ctx->link_hdl);
  648. end:
  649. return rc;
  650. }
  651. static int __cam_isp_ctx_pause_crm_timer(
  652. struct cam_context *ctx)
  653. {
  654. int rc = -EINVAL;
  655. struct cam_req_mgr_timer_notify timer;
  656. if (!ctx || !ctx->ctx_crm_intf)
  657. goto end;
  658. timer.link_hdl = ctx->link_hdl;
  659. timer.dev_hdl = ctx->dev_hdl;
  660. timer.state = false;
  661. rc = ctx->ctx_crm_intf->notify_timer(&timer);
  662. if (rc) {
  663. CAM_ERR(CAM_ISP, "Failed to pause sof timer in ctx: %u on link: 0x%x",
  664. ctx->ctx_id, ctx->link_hdl);
  665. goto end;
  666. }
  667. CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx: %u link: 0x%x success",
  668. ctx->ctx_id, ctx->link_hdl);
  669. end:
  670. return rc;
  671. }
  672. static inline void __cam_isp_ctx_update_sof_ts_util(
  673. struct cam_isp_hw_sof_event_data *sof_event_data,
  674. struct cam_isp_context *ctx_isp)
  675. {
  676. /* Delayed update, skip if ts is already updated */
  677. if (ctx_isp->sof_timestamp_val == sof_event_data->timestamp)
  678. return;
  679. ctx_isp->frame_id++;
  680. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  681. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  682. }
  683. static int cam_isp_ctx_dump_req(
  684. struct cam_isp_ctx_req *req_isp,
  685. uintptr_t cpu_addr,
  686. size_t buf_len,
  687. size_t *offset,
  688. bool dump_to_buff)
  689. {
  690. int i, rc = 0;
  691. size_t len = 0;
  692. uint32_t *buf_addr;
  693. uint32_t *buf_start, *buf_end;
  694. size_t remain_len = 0;
  695. struct cam_cdm_cmd_buf_dump_info dump_info;
  696. for (i = 0; i < req_isp->num_cfg; i++) {
  697. rc = cam_packet_util_get_cmd_mem_addr(
  698. req_isp->cfg[i].handle, &buf_addr, &len);
  699. if (rc) {
  700. CAM_ERR_RATE_LIMIT(CAM_ISP,
  701. "Failed to get_cmd_mem_addr, rc=%d",
  702. rc);
  703. } else {
  704. if (req_isp->cfg[i].offset >= ((uint32_t)len)) {
  705. CAM_ERR(CAM_ISP,
  706. "Invalid offset exp %u actual %u",
  707. req_isp->cfg[i].offset, (uint32_t)len);
  708. return -EINVAL;
  709. }
  710. remain_len = len - req_isp->cfg[i].offset;
  711. if (req_isp->cfg[i].len >
  712. ((uint32_t)remain_len)) {
  713. CAM_ERR(CAM_ISP,
  714. "Invalid len exp %u remain_len %u",
  715. req_isp->cfg[i].len,
  716. (uint32_t)remain_len);
  717. return -EINVAL;
  718. }
  719. buf_start = (uint32_t *)((uint8_t *) buf_addr +
  720. req_isp->cfg[i].offset);
  721. buf_end = (uint32_t *)((uint8_t *) buf_start +
  722. req_isp->cfg[i].len - 1);
  723. if (dump_to_buff) {
  724. if (!cpu_addr || !offset || !buf_len) {
  725. CAM_ERR(CAM_ISP, "Invalid args");
  726. break;
  727. }
  728. dump_info.src_start = buf_start;
  729. dump_info.src_end = buf_end;
  730. dump_info.dst_start = cpu_addr;
  731. dump_info.dst_offset = *offset;
  732. dump_info.dst_max_size = buf_len;
  733. rc = cam_cdm_util_dump_cmd_bufs_v2(
  734. &dump_info);
  735. *offset = dump_info.dst_offset;
  736. if (rc)
  737. return rc;
  738. } else
  739. cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
  740. }
  741. }
  742. return rc;
  743. }
  744. static int __cam_isp_ctx_enqueue_request_in_order(
  745. struct cam_context *ctx, struct cam_ctx_request *req, bool lock)
  746. {
  747. struct cam_ctx_request *req_current;
  748. struct cam_ctx_request *req_prev;
  749. struct list_head temp_list;
  750. struct cam_isp_context *ctx_isp;
  751. INIT_LIST_HEAD(&temp_list);
  752. if (lock)
  753. spin_lock_bh(&ctx->lock);
  754. if (list_empty(&ctx->pending_req_list)) {
  755. list_add_tail(&req->list, &ctx->pending_req_list);
  756. } else {
  757. list_for_each_entry_safe_reverse(
  758. req_current, req_prev, &ctx->pending_req_list, list) {
  759. if (req->request_id < req_current->request_id) {
  760. list_del_init(&req_current->list);
  761. list_add(&req_current->list, &temp_list);
  762. continue;
  763. } else if (req->request_id == req_current->request_id) {
  764. CAM_WARN(CAM_ISP,
  765. "Received duplicated request %lld",
  766. req->request_id);
  767. }
  768. break;
  769. }
  770. list_add_tail(&req->list, &ctx->pending_req_list);
  771. if (!list_empty(&temp_list)) {
  772. list_for_each_entry_safe(
  773. req_current, req_prev, &temp_list, list) {
  774. list_del_init(&req_current->list);
  775. list_add_tail(&req_current->list,
  776. &ctx->pending_req_list);
  777. }
  778. }
  779. }
  780. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  781. __cam_isp_ctx_update_event_record(ctx_isp,
  782. CAM_ISP_CTX_EVENT_SUBMIT, req);
  783. if (lock)
  784. spin_unlock_bh(&ctx->lock);
  785. return 0;
  786. }
  787. static int __cam_isp_ctx_enqueue_init_request(
  788. struct cam_context *ctx, struct cam_ctx_request *req)
  789. {
  790. int rc = 0;
  791. struct cam_ctx_request *req_old;
  792. struct cam_isp_ctx_req *req_isp_old;
  793. struct cam_isp_ctx_req *req_isp_new;
  794. struct cam_isp_prepare_hw_update_data *req_update_old;
  795. struct cam_isp_prepare_hw_update_data *req_update_new;
  796. struct cam_isp_prepare_hw_update_data *hw_update_data;
  797. spin_lock_bh(&ctx->lock);
  798. if (list_empty(&ctx->pending_req_list)) {
  799. list_add_tail(&req->list, &ctx->pending_req_list);
  800. CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
  801. req->request_id);
  802. goto end;
  803. }
  804. req_old = list_first_entry(&ctx->pending_req_list,
  805. struct cam_ctx_request, list);
  806. req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
  807. req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
  808. if (req_isp_old->hw_update_data.packet_opcode_type ==
  809. CAM_ISP_PACKET_INIT_DEV) {
  810. if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
  811. ctx->max_hw_update_entries) {
  812. CAM_WARN(CAM_ISP,
  813. "Can not merge INIT pkt num_cfgs = %d",
  814. (req_isp_old->num_cfg +
  815. req_isp_new->num_cfg));
  816. rc = -ENOMEM;
  817. }
  818. if (req_isp_old->num_fence_map_out != 0 ||
  819. req_isp_old->num_fence_map_in != 0) {
  820. CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
  821. rc = -EINVAL;
  822. }
  823. if (!rc) {
  824. memcpy(req_isp_old->fence_map_out,
  825. req_isp_new->fence_map_out,
  826. sizeof(req_isp_new->fence_map_out[0])*
  827. req_isp_new->num_fence_map_out);
  828. req_isp_old->num_fence_map_out =
  829. req_isp_new->num_fence_map_out;
  830. memcpy(req_isp_old->fence_map_in,
  831. req_isp_new->fence_map_in,
  832. sizeof(req_isp_new->fence_map_in[0])*
  833. req_isp_new->num_fence_map_in);
  834. req_isp_old->num_fence_map_in =
  835. req_isp_new->num_fence_map_in;
  836. memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
  837. req_isp_new->cfg,
  838. sizeof(req_isp_new->cfg[0]) *
  839. req_isp_new->num_cfg);
  840. req_isp_old->num_cfg += req_isp_new->num_cfg;
  841. memcpy(&req_old->pf_data, &req->pf_data,
  842. sizeof(struct cam_hw_mgr_pf_request_info));
  843. if (req_isp_new->hw_update_data.num_reg_dump_buf) {
  844. req_update_new = &req_isp_new->hw_update_data;
  845. req_update_old = &req_isp_old->hw_update_data;
  846. memcpy(&req_update_old->reg_dump_buf_desc,
  847. &req_update_new->reg_dump_buf_desc,
  848. sizeof(struct cam_cmd_buf_desc) *
  849. req_update_new->num_reg_dump_buf);
  850. req_update_old->num_reg_dump_buf =
  851. req_update_new->num_reg_dump_buf;
  852. }
  853. /* Update HW update params for ePCR */
  854. hw_update_data = &req_isp_new->hw_update_data;
  855. req_isp_old->hw_update_data.frame_header_res_id =
  856. req_isp_new->hw_update_data.frame_header_res_id;
  857. req_isp_old->hw_update_data.frame_header_cpu_addr =
  858. hw_update_data->frame_header_cpu_addr;
  859. if (req_isp_new->hw_update_data.mup_en) {
  860. req_isp_old->hw_update_data.mup_en =
  861. req_isp_new->hw_update_data.mup_en;
  862. req_isp_old->hw_update_data.mup_val =
  863. req_isp_new->hw_update_data.mup_val;
  864. req_isp_old->hw_update_data.num_exp =
  865. req_isp_new->hw_update_data.num_exp;
  866. }
  867. req_old->request_id = req->request_id;
  868. list_add_tail(&req->list, &ctx->free_req_list);
  869. }
  870. } else {
  871. CAM_WARN(CAM_ISP,
  872. "Received Update pkt before INIT pkt. req_id= %lld",
  873. req->request_id);
  874. rc = -EINVAL;
  875. }
  876. end:
  877. spin_unlock_bh(&ctx->lock);
  878. return rc;
  879. }
  880. static char *__cam_isp_ife_sfe_resource_handle_id_to_type(
  881. uint32_t resource_handle)
  882. {
  883. switch (resource_handle) {
  884. /* IFE output ports */
  885. case CAM_ISP_IFE_OUT_RES_FULL: return "IFE_FULL";
  886. case CAM_ISP_IFE_OUT_RES_DS4: return "IFE_DS4";
  887. case CAM_ISP_IFE_OUT_RES_DS16: return "IFE_DS16";
  888. case CAM_ISP_IFE_OUT_RES_RAW_DUMP: return "IFE_RAW_DUMP";
  889. case CAM_ISP_IFE_OUT_RES_FD: return "IFE_FD";
  890. case CAM_ISP_IFE_OUT_RES_PDAF: return "IFE_PDAF";
  891. case CAM_ISP_IFE_OUT_RES_RDI_0: return "IFE_RDI_0";
  892. case CAM_ISP_IFE_OUT_RES_RDI_1: return "IFE_RDI_1";
  893. case CAM_ISP_IFE_OUT_RES_RDI_2: return "IFE_RDI_2";
  894. case CAM_ISP_IFE_OUT_RES_RDI_3: return "IFE_RDI_3";
  895. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: return "IFE_STATS_HDR_BE";
  896. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: return "IFE_STATS_HDR_BHIST";
  897. case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: return "IFE_STATS_TL_BG";
  898. case CAM_ISP_IFE_OUT_RES_STATS_BF: return "IFE_STATS_BF";
  899. case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: return "IFE_STATS_AWB_BG";
  900. case CAM_ISP_IFE_OUT_RES_STATS_BHIST: return "IFE_STATS_BHIST";
  901. case CAM_ISP_IFE_OUT_RES_STATS_RS: return "IFE_STATS_RS";
  902. case CAM_ISP_IFE_OUT_RES_STATS_CS: return "IFE_STATS_CS";
  903. case CAM_ISP_IFE_OUT_RES_STATS_IHIST: return "IFE_STATS_IHIST";
  904. case CAM_ISP_IFE_OUT_RES_FULL_DISP: return "IFE_FULL_DISP";
  905. case CAM_ISP_IFE_OUT_RES_DS4_DISP: return "IFE_DS4_DISP";
  906. case CAM_ISP_IFE_OUT_RES_DS16_DISP: return "IFE_DS16_DISP";
  907. case CAM_ISP_IFE_OUT_RES_2PD: return "IFE_2PD";
  908. case CAM_ISP_IFE_OUT_RES_LCR: return "IFE_LCR";
  909. case CAM_ISP_IFE_OUT_RES_AWB_BFW: return "IFE_AWB_BFW";
  910. case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: return "IFE_PREPROCESS_2PD";
  911. case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: return "IFE_STATS_AEC_BE";
  912. case CAM_ISP_IFE_OUT_RES_LTM_STATS: return "IFE_LTM_STATS";
  913. case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: return "IFE_STATS_GTM_BHIST";
  914. case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: return "IFE_STATS_BG";
  915. case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: return "IFE_PREPROCESS_RAW";
  916. case CAM_ISP_IFE_OUT_RES_SPARSE_PD: return "IFE_SPARSE_PD";
  917. case CAM_ISP_IFE_OUT_RES_STATS_CAF: return "IFE_STATS_CAF";
  918. case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: return "IFE_STATS_BAYER_RS";
  919. case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: return "IFE_PDAF_PARSED_DATA";
  920. case CAM_ISP_IFE_OUT_RES_STATS_ALSC: return "IFE_STATS_ALSC";
  921. /* SFE output ports */
  922. case CAM_ISP_SFE_OUT_RES_RDI_0: return "SFE_RDI_0";
  923. case CAM_ISP_SFE_OUT_RES_RDI_1: return "SFE_RDI_1";
  924. case CAM_ISP_SFE_OUT_RES_RDI_2: return "SFE_RDI_2";
  925. case CAM_ISP_SFE_OUT_RES_RDI_3: return "SFE_RDI_3";
  926. case CAM_ISP_SFE_OUT_RES_RDI_4: return "SFE_RDI_4";
  927. case CAM_ISP_SFE_OUT_BE_STATS_0: return "SFE_BE_STATS_0";
  928. case CAM_ISP_SFE_OUT_BE_STATS_1: return "SFE_BE_STATS_1";
  929. case CAM_ISP_SFE_OUT_BE_STATS_2: return "SFE_BE_STATS_2";
  930. case CAM_ISP_SFE_OUT_BHIST_STATS_0: return "SFE_BHIST_STATS_0";
  931. case CAM_ISP_SFE_OUT_BHIST_STATS_1: return "SFE_BHIST_STATS_1";
  932. case CAM_ISP_SFE_OUT_BHIST_STATS_2: return "SFE_BHIST_STATS_2";
  933. case CAM_ISP_SFE_OUT_RES_LCR: return "SFE_LCR";
  934. case CAM_ISP_SFE_OUT_RES_RAW_DUMP: return "SFE_PROCESSED_RAW";
  935. case CAM_ISP_SFE_OUT_RES_IR: return "SFE_IR";
  936. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: return "SFE_RS_STATS_0";
  937. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: return "SFE_RS_STATS_1";
  938. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: return "SFE_RS_STATS_2";
  939. case CAM_ISP_SFE_OUT_HDR_STATS: return "HDR_STATS";
  940. /* SFE input ports */
  941. case CAM_ISP_SFE_IN_RD_0: return "SFE_RD_0";
  942. case CAM_ISP_SFE_IN_RD_1: return "SFE_RD_1";
  943. case CAM_ISP_SFE_IN_RD_2: return "SFE_RD_2";
  944. /* Handle invalid type */
  945. default: return "Invalid_Resource_Type";
  946. }
  947. }
  948. static const char *__cam_isp_tfe_resource_handle_id_to_type(
  949. uint32_t resource_handle)
  950. {
  951. switch (resource_handle) {
  952. /* TFE output ports */
  953. case CAM_ISP_TFE_OUT_RES_FULL: return "TFE_FULL";
  954. case CAM_ISP_TFE_OUT_RES_RAW_DUMP: return "TFE_RAW_DUMP";
  955. case CAM_ISP_TFE_OUT_RES_PDAF: return "TFE_PDAF";
  956. case CAM_ISP_TFE_OUT_RES_RDI_0: return "TFE_RDI_0";
  957. case CAM_ISP_TFE_OUT_RES_RDI_1: return "TFE_RDI_1";
  958. case CAM_ISP_TFE_OUT_RES_RDI_2: return "TFE_RDI_2";
  959. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: return "TFE_STATS_HDR_BE";
  960. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: return "TFE_STATS_HDR_BHIST";
  961. case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: return "TFE_STATS_TL_BG";
  962. case CAM_ISP_TFE_OUT_RES_STATS_BF: return "TFE_STATS_BF";
  963. case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: return "TFE_STATS_AWB_BG";
  964. case CAM_ISP_TFE_OUT_RES_STATS_RS: return "TFE_STATS_RS";
  965. case CAM_ISP_TFE_OUT_RES_DS4: return "TFE_DS_4";
  966. case CAM_ISP_TFE_OUT_RES_DS16: return "TFE_DS_16";
  967. case CAM_ISP_TFE_OUT_RES_AI: return "TFE_AI";
  968. /* Handle invalid type */
  969. default: return "Invalid_Resource_Type";
  970. }
  971. }
  972. static const char *__cam_isp_resource_handle_id_to_type(
  973. uint32_t device_type, uint32_t resource_handle)
  974. {
  975. switch (device_type) {
  976. case CAM_IFE_DEVICE_TYPE:
  977. return __cam_isp_ife_sfe_resource_handle_id_to_type(resource_handle);
  978. case CAM_TFE_DEVICE_TYPE:
  979. return __cam_isp_tfe_resource_handle_id_to_type(resource_handle);
  980. default:
  981. return "INVALID_DEV_TYPE";
  982. }
  983. }
  984. static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
  985. {
  986. uint64_t ts = 0;
  987. if (!evt_data)
  988. return 0;
  989. switch (evt_id) {
  990. case CAM_ISP_HW_EVENT_ERROR:
  991. ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
  992. timestamp;
  993. break;
  994. case CAM_ISP_HW_EVENT_SOF:
  995. ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
  996. timestamp;
  997. break;
  998. case CAM_ISP_HW_EVENT_REG_UPDATE:
  999. ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
  1000. timestamp;
  1001. break;
  1002. case CAM_ISP_HW_EVENT_EPOCH:
  1003. ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
  1004. timestamp;
  1005. break;
  1006. case CAM_ISP_HW_EVENT_EOF:
  1007. ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
  1008. timestamp;
  1009. break;
  1010. case CAM_ISP_HW_EVENT_DONE:
  1011. case CAM_ISP_HW_SECONDARY_EVENT:
  1012. break;
  1013. default:
  1014. CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
  1015. }
  1016. return ts;
  1017. }
  1018. static int __cam_isp_ctx_get_hw_timestamp(struct cam_context *ctx, uint64_t *prev_ts,
  1019. uint64_t *curr_ts, uint64_t *boot_ts)
  1020. {
  1021. struct cam_hw_cmd_args hw_cmd_args;
  1022. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  1023. int rc;
  1024. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  1025. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  1026. hw_cmd_args.u.internal_args = &isp_hw_cmd_args;
  1027. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_SOF_TS;
  1028. rc = ctx->hw_mgr_intf->hw_cmd(ctx->ctxt_to_hw_map, &hw_cmd_args);
  1029. if (rc)
  1030. return rc;
  1031. if (isp_hw_cmd_args.u.sof_ts.prev >= isp_hw_cmd_args.u.sof_ts.curr) {
  1032. CAM_ERR(CAM_ISP, "ctx:%u previous timestamp is greater than current timestamp",
  1033. ctx->ctx_id);
  1034. return -EINVAL;
  1035. }
  1036. *prev_ts = isp_hw_cmd_args.u.sof_ts.prev;
  1037. *curr_ts = isp_hw_cmd_args.u.sof_ts.curr;
  1038. *boot_ts = isp_hw_cmd_args.u.sof_ts.boot;
  1039. return 0;
  1040. }
  1041. static int __cam_isp_ctx_recover_sof_timestamp(struct cam_context *ctx, uint64_t request_id)
  1042. {
  1043. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  1044. uint64_t prev_ts, curr_ts, boot_ts;
  1045. uint64_t a, b, c;
  1046. int rc;
  1047. rc = __cam_isp_ctx_get_hw_timestamp(ctx, &prev_ts, &curr_ts, &boot_ts);
  1048. if (rc) {
  1049. CAM_ERR(CAM_ISP, "ctx:%u Failed to get timestamp from HW", ctx->ctx_id);
  1050. return rc;
  1051. }
  1052. /**
  1053. * If the last received SOF was for frame A and we have missed the SOF for frame B,
  1054. * then we need to find out if the hardware is at frame B or C.
  1055. * +-----+-----+-----+
  1056. * | A | B | C |
  1057. * +-----+-----+-----+
  1058. */
  1059. a = ctx_isp->sof_timestamp_val;
  1060. if (a == prev_ts) {
  1061. /* Hardware is at frame B */
  1062. b = curr_ts;
  1063. CAM_DBG(CAM_ISP, "ctx:%u recovered timestamp (last:0x%llx, curr:0x%llx) req: %llu",
  1064. ctx->ctx_id, a, b, request_id);
  1065. } else if (a < prev_ts) {
  1066. /* Hardware is at frame C */
  1067. b = prev_ts;
  1068. c = curr_ts;
  1069. CAM_DBG(CAM_ISP,
  1070. "ctx:%u recovered timestamp (last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu",
  1071. ctx->ctx_id, a, b, c, request_id);
  1072. } else {
  1073. /* Hardware is at frame A (which we supposedly missed) */
  1074. CAM_ERR_RATE_LIMIT(CAM_ISP,
  1075. "ctx:%u erroneous call to SOF recovery (last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu",
  1076. ctx->ctx_id, a, prev_ts, curr_ts, request_id);
  1077. return 0;
  1078. }
  1079. ctx_isp->boot_timestamp = boot_ts + (b - curr_ts);
  1080. ctx_isp->sof_timestamp_val = b;
  1081. ctx_isp->frame_id++;
  1082. return 0;
  1083. }
  1084. static void __cam_isp_ctx_send_sof_boot_timestamp(
  1085. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1086. uint32_t sof_event_status)
  1087. {
  1088. struct cam_req_mgr_message req_msg;
  1089. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1090. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1091. req_msg.u.frame_msg.request_id = request_id;
  1092. req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
  1093. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1094. req_msg.u.frame_msg.sof_status = sof_event_status;
  1095. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1096. CAM_DBG(CAM_ISP,
  1097. "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u",
  1098. request_id, ctx_isp->frame_id,
  1099. ctx_isp->boot_timestamp, sof_event_status);
  1100. if (cam_req_mgr_notify_message(&req_msg,
  1101. V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
  1102. V4L_EVENT_CAM_REQ_MGR_EVENT))
  1103. CAM_ERR(CAM_ISP,
  1104. "Error in notifying the boot time for req id:%lld",
  1105. request_id);
  1106. }
  1107. static void __cam_isp_ctx_send_unified_timestamp(
  1108. struct cam_isp_context *ctx_isp, uint64_t request_id)
  1109. {
  1110. struct cam_req_mgr_message req_msg;
  1111. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1112. req_msg.u.frame_msg_v2.frame_id = ctx_isp->frame_id;
  1113. req_msg.u.frame_msg_v2.request_id = request_id;
  1114. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_SOF_QTIMER_TIMESTAMP] =
  1115. (request_id == 0) ? 0 : ctx_isp->sof_timestamp_val;
  1116. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_BOOT_TIMESTAMP] = ctx_isp->boot_timestamp;
  1117. req_msg.u.frame_msg_v2.link_hdl = ctx_isp->base->link_hdl;
  1118. req_msg.u.frame_msg_v2.frame_id_meta = ctx_isp->frame_id_meta;
  1119. CAM_DBG(CAM_ISP,
  1120. "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:0x%llx ctx %d\
  1121. boot time stamp:0x%llx", ctx_isp->base->link_hdl, request_id,
  1122. ctx_isp->frame_id, ctx_isp->sof_timestamp_val,ctx_isp->base->ctx_id,
  1123. ctx_isp->boot_timestamp);
  1124. if (cam_req_mgr_notify_message(&req_msg,
  1125. V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1126. CAM_ERR(CAM_ISP,
  1127. "Error in notifying the sof and boot time for req id:%lld",
  1128. request_id);
  1129. }
  1130. static void __cam_isp_ctx_send_sof_timestamp_frame_header(
  1131. struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr,
  1132. uint64_t request_id, uint32_t sof_event_status)
  1133. {
  1134. uint32_t *time32 = NULL;
  1135. uint64_t timestamp = 0;
  1136. struct cam_req_mgr_message req_msg;
  1137. time32 = frame_header_cpu_addr;
  1138. timestamp = (uint64_t) time32[1];
  1139. timestamp = timestamp << 24;
  1140. timestamp |= (uint64_t)(time32[0] >> 8);
  1141. timestamp = mul_u64_u32_div(timestamp,
  1142. CAM_IFE_QTIMER_MUL_FACTOR,
  1143. CAM_IFE_QTIMER_DIV_FACTOR);
  1144. ctx_isp->sof_timestamp_val = timestamp;
  1145. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1146. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1147. req_msg.u.frame_msg.request_id = request_id;
  1148. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1149. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1150. req_msg.u.frame_msg.sof_status = sof_event_status;
  1151. CAM_DBG(CAM_ISP,
  1152. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1153. request_id, ctx_isp->frame_id,
  1154. ctx_isp->sof_timestamp_val, sof_event_status);
  1155. if (cam_req_mgr_notify_message(&req_msg,
  1156. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1157. CAM_ERR(CAM_ISP,
  1158. "Error in notifying the sof time for req id:%lld",
  1159. request_id);
  1160. }
  1161. static void __cam_isp_ctx_send_sof_timestamp(
  1162. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1163. uint32_t sof_event_status)
  1164. {
  1165. struct cam_req_mgr_message req_msg;
  1166. struct cam_context *ctx = ctx_isp->base;
  1167. if (ctx_isp->reported_frame_id == ctx_isp->frame_id) {
  1168. if (__cam_isp_ctx_recover_sof_timestamp(ctx_isp->base, request_id))
  1169. CAM_WARN(CAM_ISP, "Missed SOF. Unable to recover SOF timestamp.");
  1170. }
  1171. if (request_id == 0 && (ctx_isp->reported_frame_id == ctx_isp->frame_id)) {
  1172. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1173. "Missed SOF Recovery for invalid req, Skip notificaiton to userspace Ctx: %u frame_id %u",
  1174. ctx->ctx_id, ctx_isp->frame_id);
  1175. return;
  1176. }
  1177. ctx_isp->reported_frame_id = ctx_isp->frame_id;
  1178. if ((ctx_isp->v4l2_event_sub_ids & (1 << V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS))
  1179. && !ctx_isp->use_frame_header_ts) {
  1180. __cam_isp_ctx_send_unified_timestamp(ctx_isp,request_id);
  1181. return;
  1182. }
  1183. if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
  1184. goto end;
  1185. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1186. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1187. req_msg.u.frame_msg.request_id = request_id;
  1188. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1189. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1190. req_msg.u.frame_msg.sof_status = sof_event_status;
  1191. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1192. CAM_DBG(CAM_ISP,
  1193. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1194. request_id, ctx_isp->frame_id,
  1195. ctx_isp->sof_timestamp_val, sof_event_status);
  1196. if (cam_req_mgr_notify_message(&req_msg,
  1197. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1198. CAM_ERR(CAM_ISP,
  1199. "Error in notifying the sof time for req id:%lld",
  1200. request_id);
  1201. end:
  1202. __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
  1203. request_id, sof_event_status);
  1204. }
  1205. static void __cam_isp_ctx_handle_buf_done_fail_log(
  1206. uint64_t request_id, struct cam_isp_ctx_req *req_isp,
  1207. uint32_t isp_device_type)
  1208. {
  1209. int i;
  1210. const char *handle_type;
  1211. if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
  1212. CAM_ERR(CAM_ISP,
  1213. "Num Resources exceed mMAX %d >= %d ",
  1214. req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
  1215. return;
  1216. }
  1217. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1218. "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d",
  1219. request_id, req_isp->num_fence_map_out, req_isp->num_acked,
  1220. req_isp->bubble_report, req_isp->bubble_detected);
  1221. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1222. "Resource Handles that fail to generate buf_done in prev frame");
  1223. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1224. if (req_isp->fence_map_out[i].sync_id != -1) {
  1225. handle_type = __cam_isp_resource_handle_id_to_type(
  1226. isp_device_type, req_isp->fence_map_out[i].resource_handle);
  1227. trace_cam_log_event("Buf_done Congestion",
  1228. handle_type, request_id, req_isp->fence_map_out[i].sync_id);
  1229. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1230. "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
  1231. handle_type,
  1232. req_isp->fence_map_out[i].resource_handle,
  1233. req_isp->fence_map_out[i].sync_id);
  1234. }
  1235. }
  1236. }
  1237. static void __cam_isp_context_reset_internal_recovery_params(
  1238. struct cam_isp_context *ctx_isp)
  1239. {
  1240. atomic_set(&ctx_isp->internal_recovery_set, 0);
  1241. atomic_set(&ctx_isp->process_bubble, 0);
  1242. ctx_isp->recovery_req_id = 0;
  1243. ctx_isp->aeb_error_cnt = 0;
  1244. ctx_isp->bubble_frame_cnt = 0;
  1245. }
  1246. static int __cam_isp_context_try_internal_recovery(
  1247. struct cam_isp_context *ctx_isp)
  1248. {
  1249. int rc = 0;
  1250. struct cam_context *ctx = ctx_isp->base;
  1251. struct cam_ctx_request *req;
  1252. struct cam_isp_ctx_req *req_isp;
  1253. /*
  1254. * Start with wait list, if recovery is stil set
  1255. * errored request has not been moved to pending yet.
  1256. * Buf done for errored request has not occurred recover
  1257. * from here
  1258. */
  1259. if (!list_empty(&ctx->wait_req_list)) {
  1260. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  1261. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1262. if (req->request_id == ctx_isp->recovery_req_id) {
  1263. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1264. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1265. if (rc) {
  1266. /* Unable to do bubble recovery reset back to normal */
  1267. CAM_WARN(CAM_ISP,
  1268. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1269. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1270. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1271. req_isp->bubble_detected = false;
  1272. goto end;
  1273. }
  1274. list_del_init(&req->list);
  1275. list_add(&req->list, &ctx->pending_req_list);
  1276. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1277. CAM_INFO(CAM_ISP,
  1278. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1279. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1280. goto end;
  1281. }
  1282. }
  1283. /*
  1284. * If not in wait list only other possibility is request is in pending list
  1285. * on error detection, bubble detect is set assuming new frame after detection
  1286. * comes in, there is an rup it's moved to active list and it finishes with
  1287. * it's buf done's
  1288. */
  1289. if (!list_empty(&ctx->pending_req_list)) {
  1290. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list);
  1291. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1292. if (req->request_id == ctx_isp->recovery_req_id) {
  1293. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1294. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1295. if (rc) {
  1296. /* Unable to do bubble recovery reset back to normal */
  1297. CAM_WARN(CAM_ISP,
  1298. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1299. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1300. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1301. req_isp->bubble_detected = false;
  1302. goto end;
  1303. }
  1304. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1305. CAM_INFO(CAM_ISP,
  1306. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1307. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1308. goto end;
  1309. }
  1310. }
  1311. /* If request is not found in either of the lists skip recovery */
  1312. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1313. end:
  1314. return rc;
  1315. }
  1316. static int __cam_isp_ctx_handle_buf_done_for_req_list(
  1317. struct cam_isp_context *ctx_isp,
  1318. struct cam_ctx_request *req)
  1319. {
  1320. int rc = 0, i;
  1321. uint64_t buf_done_req_id;
  1322. struct cam_isp_ctx_req *req_isp;
  1323. struct cam_context *ctx = ctx_isp->base;
  1324. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1325. ctx_isp->active_req_cnt--;
  1326. buf_done_req_id = req->request_id;
  1327. if (req_isp->bubble_detected && req_isp->bubble_report) {
  1328. req_isp->num_acked = 0;
  1329. req_isp->num_deferred_acks = 0;
  1330. req_isp->bubble_detected = false;
  1331. list_del_init(&req->list);
  1332. atomic_set(&ctx_isp->process_bubble, 0);
  1333. req_isp->cdm_reset_before_apply = false;
  1334. ctx_isp->bubble_frame_cnt = 0;
  1335. if (buf_done_req_id <= ctx->last_flush_req) {
  1336. for (i = 0; i < req_isp->num_fence_map_out; i++)
  1337. rc = cam_sync_signal(
  1338. req_isp->fence_map_out[i].sync_id,
  1339. CAM_SYNC_STATE_SIGNALED_ERROR,
  1340. CAM_SYNC_ISP_EVENT_BUBBLE);
  1341. list_add_tail(&req->list, &ctx->free_req_list);
  1342. CAM_DBG(CAM_REQ,
  1343. "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
  1344. buf_done_req_id, ctx_isp->active_req_cnt,
  1345. ctx->ctx_id);
  1346. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1347. } else {
  1348. list_add(&req->list, &ctx->pending_req_list);
  1349. CAM_DBG(CAM_REQ,
  1350. "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
  1351. req->request_id, ctx_isp->active_req_cnt,
  1352. ctx->ctx_id);
  1353. }
  1354. } else {
  1355. if (!ctx_isp->use_frame_header_ts) {
  1356. if (ctx_isp->reported_req_id < buf_done_req_id) {
  1357. ctx_isp->reported_req_id = buf_done_req_id;
  1358. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  1359. buf_done_req_id,
  1360. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1361. }
  1362. }
  1363. list_del_init(&req->list);
  1364. list_add_tail(&req->list, &ctx->free_req_list);
  1365. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  1366. req_isp->cdm_reset_before_apply = false;
  1367. req_isp->num_acked = 0;
  1368. req_isp->num_deferred_acks = 0;
  1369. /*
  1370. * Only update the process_bubble and bubble_frame_cnt
  1371. * when bubble is detected on this req, in case the other
  1372. * request is processing bubble.
  1373. */
  1374. if (req_isp->bubble_detected) {
  1375. atomic_set(&ctx_isp->process_bubble, 0);
  1376. ctx_isp->bubble_frame_cnt = 0;
  1377. req_isp->bubble_detected = false;
  1378. }
  1379. CAM_DBG(CAM_REQ,
  1380. "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
  1381. buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1382. ctx_isp->req_info.last_bufdone_req_id = req->request_id;
  1383. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1384. }
  1385. if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt)
  1386. __cam_isp_context_try_internal_recovery(ctx_isp);
  1387. cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
  1388. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1389. CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
  1390. __cam_isp_ctx_update_event_record(ctx_isp,
  1391. CAM_ISP_CTX_EVENT_BUFDONE, req);
  1392. return rc;
  1393. }
  1394. static int __cam_isp_ctx_handle_buf_done_for_request(
  1395. struct cam_isp_context *ctx_isp,
  1396. struct cam_ctx_request *req,
  1397. struct cam_isp_hw_done_event_data *done,
  1398. uint32_t bubble_state,
  1399. struct cam_isp_hw_done_event_data *done_next_req)
  1400. {
  1401. int rc = 0;
  1402. int i, j;
  1403. struct cam_isp_ctx_req *req_isp;
  1404. struct cam_context *ctx = ctx_isp->base;
  1405. const char *handle_type;
  1406. trace_cam_buf_done("ISP", ctx, req);
  1407. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1408. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1409. bubble_state, req_isp->bubble_detected);
  1410. done_next_req->num_handles = 0;
  1411. done_next_req->timestamp = done->timestamp;
  1412. for (i = 0; i < done->num_handles; i++) {
  1413. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1414. if (done->resource_handle[i] ==
  1415. req_isp->fence_map_out[j].resource_handle)
  1416. break;
  1417. }
  1418. if (j == req_isp->num_fence_map_out) {
  1419. /*
  1420. * If not found in current request, it could be
  1421. * belonging to next request, this can happen if
  1422. * IRQ delay happens. It is only valid when the
  1423. * platform doesn't have last consumed address.
  1424. */
  1425. CAM_WARN(CAM_ISP,
  1426. "BUF_DONE for res %s not found in Req %lld ",
  1427. __cam_isp_resource_handle_id_to_type(
  1428. ctx_isp->isp_device_type,
  1429. done->resource_handle[i]),
  1430. req->request_id);
  1431. done_next_req->resource_handle
  1432. [done_next_req->num_handles++] =
  1433. done->resource_handle[i];
  1434. continue;
  1435. }
  1436. if (req_isp->fence_map_out[j].sync_id == -1) {
  1437. handle_type =
  1438. __cam_isp_resource_handle_id_to_type(
  1439. ctx_isp->isp_device_type,
  1440. req_isp->fence_map_out[j].resource_handle);
  1441. CAM_WARN(CAM_ISP,
  1442. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1443. req->request_id, i, j, handle_type);
  1444. trace_cam_log_event("Duplicate BufDone",
  1445. handle_type, req->request_id, ctx->ctx_id);
  1446. done_next_req->resource_handle
  1447. [done_next_req->num_handles++] =
  1448. done->resource_handle[i];
  1449. continue;
  1450. }
  1451. /* Get buf handles from packet and retrieve them from presil framework */
  1452. if (cam_presil_mode_enabled()) {
  1453. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1454. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1455. if (rc) {
  1456. CAM_ERR(CAM_ISP,
  1457. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1458. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1459. return rc;
  1460. }
  1461. }
  1462. if (!req_isp->bubble_detected) {
  1463. CAM_DBG(CAM_ISP,
  1464. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1465. req->request_id,
  1466. req_isp->fence_map_out[j].resource_handle,
  1467. req_isp->fence_map_out[j].sync_id,
  1468. ctx->ctx_id);
  1469. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1470. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1471. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1472. if (rc)
  1473. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  1474. rc);
  1475. } else if (!req_isp->bubble_report) {
  1476. CAM_DBG(CAM_ISP,
  1477. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1478. req->request_id,
  1479. req_isp->fence_map_out[j].resource_handle,
  1480. req_isp->fence_map_out[j].sync_id,
  1481. ctx->ctx_id);
  1482. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1483. CAM_SYNC_STATE_SIGNALED_ERROR,
  1484. CAM_SYNC_ISP_EVENT_BUBBLE);
  1485. if (rc)
  1486. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  1487. rc);
  1488. } else {
  1489. /*
  1490. * Ignore the buffer done if bubble detect is on
  1491. * Increment the ack number here, and queue the
  1492. * request back to pending list whenever all the
  1493. * buffers are done.
  1494. */
  1495. req_isp->num_acked++;
  1496. CAM_DBG(CAM_ISP,
  1497. "buf done with bubble state %d recovery %d for req %lld, ctx %u",
  1498. bubble_state,
  1499. req_isp->bubble_report,
  1500. req->request_id,
  1501. ctx->ctx_id);
  1502. continue;
  1503. }
  1504. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1505. req->request_id,
  1506. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1507. if (!rc) {
  1508. req_isp->num_acked++;
  1509. req_isp->fence_map_out[j].sync_id = -1;
  1510. }
  1511. if ((ctx_isp->use_frame_header_ts) &&
  1512. (req_isp->hw_update_data.frame_header_res_id ==
  1513. req_isp->fence_map_out[j].resource_handle))
  1514. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1515. ctx_isp,
  1516. req_isp->hw_update_data.frame_header_cpu_addr,
  1517. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1518. }
  1519. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1520. /* Should not happen */
  1521. CAM_ERR(CAM_ISP,
  1522. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1523. req->request_id, req_isp->num_acked,
  1524. req_isp->num_fence_map_out, ctx->ctx_id);
  1525. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1526. }
  1527. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1528. return rc;
  1529. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1530. return rc;
  1531. }
  1532. static int __cam_isp_handle_deferred_buf_done(
  1533. struct cam_isp_context *ctx_isp,
  1534. struct cam_ctx_request *req,
  1535. bool bubble_handling,
  1536. uint32_t status, uint32_t event_cause)
  1537. {
  1538. int i, j;
  1539. int rc = 0;
  1540. struct cam_isp_ctx_req *req_isp =
  1541. (struct cam_isp_ctx_req *) req->req_priv;
  1542. struct cam_context *ctx = ctx_isp->base;
  1543. CAM_DBG(CAM_ISP,
  1544. "ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
  1545. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1546. req_isp->num_acked, bubble_handling);
  1547. for (i = 0; i < req_isp->num_deferred_acks; i++) {
  1548. j = req_isp->deferred_fence_map_index[i];
  1549. CAM_DBG(CAM_ISP,
  1550. "ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
  1551. ctx->ctx_id, status, event_cause,
  1552. req->request_id,
  1553. req_isp->fence_map_out[j].resource_handle,
  1554. req_isp->fence_map_out[j].sync_id);
  1555. if (req_isp->fence_map_out[j].sync_id == -1) {
  1556. CAM_WARN(CAM_ISP,
  1557. "ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
  1558. ctx->ctx_id, req->request_id, j,
  1559. req_isp->fence_map_out[j].resource_handle);
  1560. continue;
  1561. }
  1562. if (!bubble_handling) {
  1563. CAM_WARN(CAM_ISP,
  1564. "Unexpected Buf done for res=0x%x on ctx[%d] for Req %llu, status=%d, possible bh delays",
  1565. req_isp->fence_map_out[j].resource_handle, ctx->ctx_id,
  1566. req->request_id, status);
  1567. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1568. status, event_cause);
  1569. if (rc) {
  1570. CAM_ERR(CAM_ISP,
  1571. "ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
  1572. ctx->ctx_id, req->request_id,
  1573. req_isp->fence_map_out[j].sync_id,
  1574. status, rc);
  1575. } else {
  1576. req_isp->num_acked++;
  1577. req_isp->fence_map_out[j].sync_id = -1;
  1578. }
  1579. } else {
  1580. req_isp->num_acked++;
  1581. }
  1582. }
  1583. CAM_DBG(CAM_ISP,
  1584. "ctx[%d] : Req %llu : Handled %d deferred buf_dones num_acked=%d, num_fence_map_out=%d",
  1585. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1586. req_isp->num_acked, req_isp->num_fence_map_out);
  1587. req_isp->num_deferred_acks = 0;
  1588. return rc;
  1589. }
  1590. static int __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  1591. struct cam_isp_context *ctx_isp,
  1592. struct cam_ctx_request *req)
  1593. {
  1594. int rc = 0;
  1595. struct cam_context *ctx = ctx_isp->base;
  1596. struct cam_isp_ctx_req *req_isp;
  1597. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1598. if (req_isp->num_deferred_acks)
  1599. rc = __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1600. req_isp->bubble_report,
  1601. CAM_SYNC_STATE_SIGNALED_ERROR,
  1602. CAM_SYNC_ISP_EVENT_BUBBLE);
  1603. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1604. /* Should not happen */
  1605. CAM_ERR(CAM_ISP,
  1606. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1607. req->request_id, req_isp->num_acked,
  1608. req_isp->num_fence_map_out, ctx->ctx_id);
  1609. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1610. }
  1611. if (req_isp->num_acked == req_isp->num_fence_map_out)
  1612. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1613. return rc;
  1614. }
  1615. static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1616. struct cam_isp_context *ctx_isp,
  1617. struct cam_ctx_request *req,
  1618. struct cam_isp_hw_done_event_data *done,
  1619. uint32_t bubble_state,
  1620. bool verify_consumed_addr,
  1621. bool defer_buf_done)
  1622. {
  1623. int rc = 0;
  1624. int i, j;
  1625. struct cam_isp_ctx_req *req_isp;
  1626. struct cam_context *ctx = ctx_isp->base;
  1627. const char *handle_type;
  1628. uint32_t cmp_addr = 0;
  1629. struct cam_isp_hw_done_event_data unhandled_done = {0};
  1630. trace_cam_buf_done("ISP", ctx, req);
  1631. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1632. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1633. bubble_state, req_isp->bubble_detected);
  1634. if (done->num_handles > CAM_NUM_OUT_PER_COMP_IRQ_MAX) {
  1635. CAM_ERR(CAM_ISP, "ctx: %u req: %llu num_handles: %u is more than %u",
  1636. ctx->ctx_id, req->request_id,
  1637. done->num_handles, CAM_NUM_OUT_PER_COMP_IRQ_MAX);
  1638. return -EINVAL;
  1639. }
  1640. unhandled_done.timestamp = done->timestamp;
  1641. for (i = 0; i < done->num_handles; i++) {
  1642. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1643. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1644. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1645. req_isp->fence_map_out[j].image_buf_addr[0];
  1646. if (verify_consumed_addr && (done->last_consumed_addr[i] != cmp_addr))
  1647. continue;
  1648. if (done->resource_handle[i] ==
  1649. req_isp->fence_map_out[j].resource_handle)
  1650. break;
  1651. }
  1652. if (j == req_isp->num_fence_map_out) {
  1653. /*
  1654. * If not found in current request, it could be
  1655. * belonging to next request, this can happen if
  1656. * IRQ delay happens. It is only valid when the
  1657. * platform doesn't have last consumed address.
  1658. */
  1659. CAM_DBG(CAM_ISP,
  1660. "BUF_DONE for res %s not found in Req %lld ",
  1661. __cam_isp_resource_handle_id_to_type(
  1662. ctx_isp->isp_device_type, done->resource_handle[i]),
  1663. req->request_id);
  1664. unhandled_done.resource_handle[unhandled_done.num_handles] =
  1665. done->resource_handle[i];
  1666. unhandled_done.last_consumed_addr[unhandled_done.num_handles] =
  1667. done->last_consumed_addr[i];
  1668. unhandled_done.num_handles++;
  1669. continue;
  1670. }
  1671. if (req_isp->fence_map_out[j].sync_id == -1) {
  1672. handle_type = __cam_isp_resource_handle_id_to_type(
  1673. ctx_isp->isp_device_type,
  1674. req_isp->fence_map_out[j].resource_handle);
  1675. CAM_WARN(CAM_ISP,
  1676. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1677. req->request_id, i, j, handle_type);
  1678. trace_cam_log_event("Duplicate BufDone",
  1679. handle_type, req->request_id, ctx->ctx_id);
  1680. continue;
  1681. }
  1682. /* Get buf handles from packet and retrieve them from presil framework */
  1683. if (cam_presil_mode_enabled()) {
  1684. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1685. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1686. if (rc) {
  1687. CAM_ERR(CAM_ISP,
  1688. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1689. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1690. return rc;
  1691. }
  1692. }
  1693. if (defer_buf_done) {
  1694. uint32_t deferred_indx = req_isp->num_deferred_acks;
  1695. /*
  1696. * If we are handling this BUF_DONE event for a request
  1697. * that is still in wait_list, do not signal now,
  1698. * instead mark it as done and handle it later -
  1699. * if this request is going into BUBBLE state later
  1700. * it will automatically be re-applied. If this is not
  1701. * going into BUBBLE, signal fences later.
  1702. * Note - we will come here only if the last consumed
  1703. * address matches with this ports buffer.
  1704. */
  1705. req_isp->deferred_fence_map_index[deferred_indx] = j;
  1706. req_isp->num_deferred_acks++;
  1707. CAM_DBG(CAM_ISP,
  1708. "ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
  1709. ctx->ctx_id, req->request_id, bubble_state,
  1710. req_isp->bubble_report);
  1711. CAM_DBG(CAM_ISP,
  1712. "ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
  1713. ctx->ctx_id, req_isp->num_deferred_acks, j,
  1714. req_isp->fence_map_out[j].resource_handle,
  1715. req_isp->fence_map_out[j].sync_id);
  1716. continue;
  1717. } else if (!req_isp->bubble_detected) {
  1718. CAM_DBG(CAM_ISP,
  1719. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1720. req->request_id,
  1721. req_isp->fence_map_out[j].resource_handle,
  1722. req_isp->fence_map_out[j].sync_id,
  1723. ctx->ctx_id);
  1724. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1725. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1726. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1727. if (rc) {
  1728. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1729. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1730. } else if (req_isp->num_deferred_acks) {
  1731. /* Process deferred buf_done acks */
  1732. __cam_isp_handle_deferred_buf_done(ctx_isp,
  1733. req, false,
  1734. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1735. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1736. }
  1737. /* Reset fence */
  1738. req_isp->fence_map_out[j].sync_id = -1;
  1739. } else if (!req_isp->bubble_report) {
  1740. CAM_DBG(CAM_ISP,
  1741. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1742. req->request_id,
  1743. req_isp->fence_map_out[j].resource_handle,
  1744. req_isp->fence_map_out[j].sync_id,
  1745. ctx->ctx_id);
  1746. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1747. CAM_SYNC_STATE_SIGNALED_ERROR,
  1748. CAM_SYNC_ISP_EVENT_BUBBLE);
  1749. if (rc) {
  1750. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1751. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1752. } else if (req_isp->num_deferred_acks) {
  1753. /* Process deferred buf_done acks */
  1754. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1755. false,
  1756. CAM_SYNC_STATE_SIGNALED_ERROR,
  1757. CAM_SYNC_ISP_EVENT_BUBBLE);
  1758. }
  1759. /* Reset fence */
  1760. req_isp->fence_map_out[j].sync_id = -1;
  1761. } else {
  1762. /*
  1763. * Ignore the buffer done if bubble detect is on
  1764. * Increment the ack number here, and queue the
  1765. * request back to pending list whenever all the
  1766. * buffers are done.
  1767. */
  1768. req_isp->num_acked++;
  1769. CAM_DBG(CAM_ISP,
  1770. "buf done with bubble state %d recovery %d for req %lld, ctx %u",
  1771. bubble_state,
  1772. req_isp->bubble_report,
  1773. req->request_id,
  1774. ctx->ctx_id);
  1775. /* Process deferred buf_done acks */
  1776. if (req_isp->num_deferred_acks)
  1777. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1778. true,
  1779. CAM_SYNC_STATE_SIGNALED_ERROR,
  1780. CAM_SYNC_ISP_EVENT_BUBBLE);
  1781. if (req_isp->num_acked == req_isp->num_fence_map_out) {
  1782. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1783. if (rc)
  1784. CAM_ERR(CAM_ISP,
  1785. "Error in buf done for req = %llu with rc = %d",
  1786. req->request_id, rc);
  1787. return rc;
  1788. }
  1789. continue;
  1790. }
  1791. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1792. req->request_id,
  1793. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1794. if (!rc) {
  1795. req_isp->num_acked++;
  1796. }
  1797. if ((ctx_isp->use_frame_header_ts) &&
  1798. (req_isp->hw_update_data.frame_header_res_id ==
  1799. req_isp->fence_map_out[j].resource_handle))
  1800. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1801. ctx_isp,
  1802. req_isp->hw_update_data.frame_header_cpu_addr,
  1803. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1804. }
  1805. if ((unhandled_done.num_handles > 0) && (!defer_buf_done))
  1806. __cam_isp_ctx_check_deferred_buf_done(
  1807. ctx_isp, &unhandled_done, bubble_state);
  1808. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1809. /* Should not happen */
  1810. CAM_ERR(CAM_ISP,
  1811. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1812. req->request_id, req_isp->num_acked,
  1813. req_isp->num_fence_map_out, ctx->ctx_id);
  1814. }
  1815. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1816. return rc;
  1817. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1818. return rc;
  1819. }
  1820. static int __cam_isp_ctx_handle_buf_done(
  1821. struct cam_isp_context *ctx_isp,
  1822. struct cam_isp_hw_done_event_data *done,
  1823. uint32_t bubble_state)
  1824. {
  1825. int rc = 0;
  1826. struct cam_ctx_request *req;
  1827. struct cam_context *ctx = ctx_isp->base;
  1828. struct cam_isp_hw_done_event_data done_next_req;
  1829. if (list_empty(&ctx->active_req_list)) {
  1830. CAM_WARN(CAM_ISP, "Buf done with no active request");
  1831. return 0;
  1832. }
  1833. req = list_first_entry(&ctx->active_req_list,
  1834. struct cam_ctx_request, list);
  1835. rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done,
  1836. bubble_state, &done_next_req);
  1837. if (done_next_req.num_handles) {
  1838. struct cam_isp_hw_done_event_data unhandled_res;
  1839. struct cam_ctx_request *next_req = list_last_entry(
  1840. &ctx->active_req_list, struct cam_ctx_request, list);
  1841. if (next_req->request_id != req->request_id) {
  1842. /*
  1843. * Few resource handles are already signalled in the
  1844. * current request, lets check if there is another
  1845. * request waiting for these resources. This can
  1846. * happen if handling some of next request's buf done
  1847. * events are happening first before handling current
  1848. * request's remaining buf dones due to IRQ scheduling.
  1849. * Lets check only one more request as we will have
  1850. * maximum of 2 requests in active_list at any time.
  1851. */
  1852. CAM_WARN(CAM_ISP,
  1853. "Unhandled buf done resources for req %lld, trying next request %lld in active_list",
  1854. req->request_id, next_req->request_id);
  1855. __cam_isp_ctx_handle_buf_done_for_request(ctx_isp,
  1856. next_req, &done_next_req,
  1857. bubble_state, &unhandled_res);
  1858. if (unhandled_res.num_handles == 0)
  1859. CAM_INFO(CAM_ISP,
  1860. "BUF Done event handed for next request %lld",
  1861. next_req->request_id);
  1862. else
  1863. CAM_ERR(CAM_ISP,
  1864. "BUF Done not handled for next request %lld",
  1865. next_req->request_id);
  1866. } else {
  1867. CAM_WARN(CAM_ISP,
  1868. "Req %lld only active request, spurious buf_done rxd",
  1869. req->request_id);
  1870. }
  1871. }
  1872. return rc;
  1873. }
  1874. static void __cam_isp_ctx_buf_done_match_req(
  1875. struct cam_ctx_request *req,
  1876. struct cam_isp_hw_done_event_data *done,
  1877. bool *irq_delay_detected)
  1878. {
  1879. int i, j;
  1880. uint32_t match_count = 0;
  1881. struct cam_isp_ctx_req *req_isp;
  1882. uint32_t cmp_addr = 0;
  1883. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1884. for (i = 0; i < done->num_handles; i++) {
  1885. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1886. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1887. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1888. req_isp->fence_map_out[j].image_buf_addr[0];
  1889. if ((done->resource_handle[i] ==
  1890. req_isp->fence_map_out[j].resource_handle) &&
  1891. (done->last_consumed_addr[i] == cmp_addr)) {
  1892. match_count++;
  1893. break;
  1894. }
  1895. }
  1896. }
  1897. if (match_count > 0)
  1898. *irq_delay_detected = true;
  1899. else
  1900. *irq_delay_detected = false;
  1901. CAM_DBG(CAM_ISP,
  1902. "buf done num handles %d match count %d for next req:%lld",
  1903. done->num_handles, match_count, req->request_id);
  1904. CAM_DBG(CAM_ISP,
  1905. "irq_delay_detected %d", *irq_delay_detected);
  1906. }
  1907. static void __cam_isp_ctx_try_buf_done_process_for_active_request(
  1908. uint32_t deferred_ack_start_idx, struct cam_isp_context *ctx_isp,
  1909. struct cam_ctx_request *deferred_req)
  1910. {
  1911. int i, j, deferred_map_idx, rc;
  1912. struct cam_context *ctx = ctx_isp->base;
  1913. struct cam_ctx_request *curr_active_req;
  1914. struct cam_isp_ctx_req *curr_active_isp_req;
  1915. struct cam_isp_ctx_req *deferred_isp_req;
  1916. if (list_empty(&ctx->active_req_list))
  1917. return;
  1918. curr_active_req = list_first_entry(&ctx->active_req_list,
  1919. struct cam_ctx_request, list);
  1920. curr_active_isp_req = (struct cam_isp_ctx_req *)curr_active_req->req_priv;
  1921. deferred_isp_req = (struct cam_isp_ctx_req *)deferred_req->req_priv;
  1922. /* Check from newly updated deferred acks */
  1923. for (i = deferred_ack_start_idx; i < deferred_isp_req->num_deferred_acks; i++) {
  1924. deferred_map_idx = deferred_isp_req->deferred_fence_map_index[i];
  1925. for (j = 0; j < curr_active_isp_req->num_fence_map_out; j++) {
  1926. /* resource needs to match */
  1927. if (curr_active_isp_req->fence_map_out[j].resource_handle !=
  1928. deferred_isp_req->fence_map_out[deferred_map_idx].resource_handle)
  1929. continue;
  1930. /* Check if fence is valid */
  1931. if (curr_active_isp_req->fence_map_out[j].sync_id == -1)
  1932. break;
  1933. CAM_WARN(CAM_ISP,
  1934. "Processing delayed buf done req: %llu bubble_detected: %s res: 0x%x fd: 0x%x, ctx: %u [deferred req: %llu last applied: %llu]",
  1935. curr_active_req->request_id,
  1936. CAM_BOOL_TO_YESNO(curr_active_isp_req->bubble_detected),
  1937. curr_active_isp_req->fence_map_out[j].resource_handle,
  1938. curr_active_isp_req->fence_map_out[j].sync_id, ctx->ctx_id,
  1939. deferred_req->request_id, ctx_isp->last_applied_req_id);
  1940. /* Signal only if bubble is not detected for this request */
  1941. if (!curr_active_isp_req->bubble_detected) {
  1942. rc = cam_sync_signal(curr_active_isp_req->fence_map_out[j].sync_id,
  1943. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1944. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1945. if (rc)
  1946. CAM_ERR(CAM_ISP,
  1947. "Sync: %d for req: %llu failed with rc: %d",
  1948. curr_active_isp_req->fence_map_out[j].sync_id,
  1949. curr_active_req->request_id, rc);
  1950. curr_active_isp_req->fence_map_out[j].sync_id = -1;
  1951. }
  1952. curr_active_isp_req->num_acked++;
  1953. break;
  1954. }
  1955. }
  1956. }
  1957. static int __cam_isp_ctx_check_deferred_buf_done(
  1958. struct cam_isp_context *ctx_isp,
  1959. struct cam_isp_hw_done_event_data *done,
  1960. uint32_t bubble_state)
  1961. {
  1962. int rc = 0;
  1963. uint32_t curr_num_deferred = 0;
  1964. struct cam_ctx_request *req;
  1965. struct cam_context *ctx = ctx_isp->base;
  1966. struct cam_isp_ctx_req *req_isp;
  1967. bool req_in_pending_wait_list = false;
  1968. if (!list_empty(&ctx->wait_req_list)) {
  1969. req = list_first_entry(&ctx->wait_req_list,
  1970. struct cam_ctx_request, list);
  1971. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1972. curr_num_deferred = req_isp->num_deferred_acks;
  1973. req_in_pending_wait_list = true;
  1974. if (ctx_isp->last_applied_req_id !=
  1975. ctx_isp->last_bufdone_err_apply_req_id) {
  1976. CAM_DBG(CAM_ISP,
  1977. "Trying to find buf done with req in wait list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u",
  1978. req->request_id, ctx_isp->last_applied_req_id,
  1979. ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred);
  1980. ctx_isp->last_bufdone_err_apply_req_id =
  1981. ctx_isp->last_applied_req_id;
  1982. }
  1983. /*
  1984. * Verify consumed address for this request to make sure
  1985. * we are handling the buf_done for the correct
  1986. * buffer. Also defer actual buf_done handling, i.e
  1987. * do not signal the fence as this request may go into
  1988. * Bubble state eventully.
  1989. */
  1990. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1991. ctx_isp, req, done, bubble_state, true, true);
  1992. /* Check for active req if any deferred is processed */
  1993. if (req_isp->num_deferred_acks > curr_num_deferred)
  1994. __cam_isp_ctx_try_buf_done_process_for_active_request(
  1995. curr_num_deferred, ctx_isp, req);
  1996. } else if (!list_empty(&ctx->pending_req_list)) {
  1997. /*
  1998. * We saw the case that the hw config is blocked due to
  1999. * some reason, the we get the reg upd and buf done before
  2000. * the req is added to wait req list.
  2001. */
  2002. req = list_first_entry(&ctx->pending_req_list,
  2003. struct cam_ctx_request, list);
  2004. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2005. curr_num_deferred = req_isp->num_deferred_acks;
  2006. req_in_pending_wait_list = true;
  2007. if (ctx_isp->last_applied_req_id !=
  2008. ctx_isp->last_bufdone_err_apply_req_id) {
  2009. CAM_DBG(CAM_ISP,
  2010. "Trying to find buf done with req in pending list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u",
  2011. req->request_id, ctx_isp->last_applied_req_id,
  2012. ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred);
  2013. ctx_isp->last_bufdone_err_apply_req_id =
  2014. ctx_isp->last_applied_req_id;
  2015. }
  2016. /*
  2017. * Verify consumed address for this request to make sure
  2018. * we are handling the buf_done for the correct
  2019. * buffer. Also defer actual buf_done handling, i.e
  2020. * do not signal the fence as this request may go into
  2021. * Bubble state eventully.
  2022. */
  2023. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2024. ctx_isp, req, done, bubble_state, true, true);
  2025. /* Check for active req if any deferred is processed */
  2026. if (req_isp->num_deferred_acks > curr_num_deferred)
  2027. __cam_isp_ctx_try_buf_done_process_for_active_request(
  2028. curr_num_deferred, ctx_isp, req);
  2029. }
  2030. if (!req_in_pending_wait_list && (ctx_isp->last_applied_req_id !=
  2031. ctx_isp->last_bufdone_err_apply_req_id)) {
  2032. CAM_DBG(CAM_ISP,
  2033. "Buf done with no active request bubble_state=%d last_applied_req_id:%lld",
  2034. bubble_state, ctx_isp->last_applied_req_id);
  2035. ctx_isp->last_bufdone_err_apply_req_id =
  2036. ctx_isp->last_applied_req_id;
  2037. }
  2038. return rc;
  2039. }
  2040. static int __cam_isp_ctx_handle_buf_done_verify_addr(
  2041. struct cam_isp_context *ctx_isp,
  2042. struct cam_isp_hw_done_event_data *done,
  2043. uint32_t bubble_state)
  2044. {
  2045. int rc = 0;
  2046. bool irq_delay_detected = false;
  2047. struct cam_ctx_request *req;
  2048. struct cam_ctx_request *next_req = NULL;
  2049. struct cam_context *ctx = ctx_isp->base;
  2050. if (list_empty(&ctx->active_req_list)) {
  2051. return __cam_isp_ctx_check_deferred_buf_done(
  2052. ctx_isp, done, bubble_state);
  2053. }
  2054. req = list_first_entry(&ctx->active_req_list,
  2055. struct cam_ctx_request, list);
  2056. if (ctx_isp->active_req_cnt > 1) {
  2057. next_req = list_last_entry(
  2058. &ctx->active_req_list,
  2059. struct cam_ctx_request, list);
  2060. if (next_req->request_id != req->request_id)
  2061. __cam_isp_ctx_buf_done_match_req(next_req, done,
  2062. &irq_delay_detected);
  2063. else
  2064. CAM_WARN(CAM_ISP,
  2065. "Req %lld only active request, spurious buf_done rxd",
  2066. req->request_id);
  2067. }
  2068. /*
  2069. * If irq delay isn't detected, then we need to verify
  2070. * the consumed address for current req, otherwise, we
  2071. * can't verify the consumed address.
  2072. */
  2073. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2074. ctx_isp, req, done, bubble_state,
  2075. !irq_delay_detected, false);
  2076. /*
  2077. * Verify the consumed address for next req all the time,
  2078. * since the reported buf done event may belong to current
  2079. * req, then we can't signal this event for next req.
  2080. */
  2081. if (!rc && irq_delay_detected)
  2082. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2083. ctx_isp, next_req, done,
  2084. bubble_state, true, false);
  2085. return rc;
  2086. }
  2087. static int __cam_isp_ctx_handle_buf_done_in_activated_state(
  2088. struct cam_isp_context *ctx_isp,
  2089. struct cam_isp_hw_done_event_data *done,
  2090. uint32_t bubble_state)
  2091. {
  2092. int rc = 0;
  2093. if (ctx_isp->support_consumed_addr)
  2094. rc = __cam_isp_ctx_handle_buf_done_verify_addr(
  2095. ctx_isp, done, bubble_state);
  2096. else
  2097. rc = __cam_isp_ctx_handle_buf_done(
  2098. ctx_isp, done, bubble_state);
  2099. return rc;
  2100. }
  2101. static int __cam_isp_ctx_apply_pending_req(
  2102. void *priv, void *data)
  2103. {
  2104. int rc = 0;
  2105. int64_t prev_applied_req;
  2106. struct cam_context *ctx = NULL;
  2107. struct cam_isp_context *ctx_isp = priv;
  2108. struct cam_ctx_request *req;
  2109. struct cam_isp_ctx_req *req_isp;
  2110. struct cam_hw_config_args cfg = {0};
  2111. if (!ctx_isp) {
  2112. CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx);
  2113. rc = -EINVAL;
  2114. goto end;
  2115. }
  2116. ctx = ctx_isp->base;
  2117. if (list_empty(&ctx->pending_req_list)) {
  2118. CAM_DBG(CAM_ISP, "No pending requests to apply");
  2119. rc = -EFAULT;
  2120. goto end;
  2121. }
  2122. if (ctx_isp->vfps_aux_context) {
  2123. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED)
  2124. goto end;
  2125. if (ctx_isp->active_req_cnt >= 1)
  2126. goto end;
  2127. } else {
  2128. if ((ctx->state != CAM_CTX_ACTIVATED) ||
  2129. (!atomic_read(&ctx_isp->rxd_epoch)) ||
  2130. (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED))
  2131. goto end;
  2132. if (ctx_isp->active_req_cnt >= 2)
  2133. goto end;
  2134. }
  2135. spin_lock_bh(&ctx->lock);
  2136. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2137. list);
  2138. spin_unlock_bh(&ctx->lock);
  2139. CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
  2140. req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
  2141. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2142. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2143. cfg.request_id = req->request_id;
  2144. cfg.hw_update_entries = req_isp->cfg;
  2145. cfg.num_hw_update_entries = req_isp->num_cfg;
  2146. cfg.priv = &req_isp->hw_update_data;
  2147. /*
  2148. * Offline mode may receive the SOF and REG_UPD earlier than
  2149. * CDM processing return back, so we set the substate before
  2150. * apply setting.
  2151. */
  2152. spin_lock_bh(&ctx->lock);
  2153. atomic_set(&ctx_isp->rxd_epoch, 0);
  2154. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
  2155. prev_applied_req = ctx_isp->last_applied_req_id;
  2156. ctx_isp->last_applied_req_id = req->request_id;
  2157. atomic_set(&ctx_isp->apply_in_progress, 1);
  2158. list_del_init(&req->list);
  2159. list_add_tail(&req->list, &ctx->wait_req_list);
  2160. spin_unlock_bh(&ctx->lock);
  2161. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  2162. if (rc) {
  2163. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
  2164. spin_lock_bh(&ctx->lock);
  2165. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2166. ctx_isp->last_applied_req_id = prev_applied_req;
  2167. atomic_set(&ctx_isp->apply_in_progress, 0);
  2168. list_del_init(&req->list);
  2169. list_add(&req->list, &ctx->pending_req_list);
  2170. spin_unlock_bh(&ctx->lock);
  2171. } else {
  2172. atomic_set(&ctx_isp->apply_in_progress, 0);
  2173. CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
  2174. CAM_ISP_CTX_ACTIVATED_APPLIED,
  2175. ctx_isp->last_applied_req_id);
  2176. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2177. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  2178. req->request_id);
  2179. }
  2180. end:
  2181. return rc;
  2182. }
  2183. static int __cam_isp_ctx_schedule_apply_req(
  2184. struct cam_isp_context *ctx_isp)
  2185. {
  2186. int rc = 0;
  2187. struct crm_workq_task *task;
  2188. task = cam_req_mgr_workq_get_task(ctx_isp->workq);
  2189. if (!task) {
  2190. CAM_ERR(CAM_ISP, "No task for worker");
  2191. return -ENOMEM;
  2192. }
  2193. task->process_cb = __cam_isp_ctx_apply_pending_req;
  2194. rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0);
  2195. if (rc)
  2196. CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc);
  2197. return rc;
  2198. }
  2199. static int __cam_isp_ctx_offline_epoch_in_activated_state(
  2200. struct cam_isp_context *ctx_isp, void *evt_data)
  2201. {
  2202. struct cam_context *ctx = ctx_isp->base;
  2203. struct cam_ctx_request *req, *req_temp;
  2204. uint64_t request_id = 0;
  2205. atomic_set(&ctx_isp->rxd_epoch, 1);
  2206. CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
  2207. ctx->ctx_id);
  2208. /*
  2209. * For offline it is not possible for epoch to be generated without
  2210. * RUP done. IRQ scheduling delays can possibly cause this.
  2211. */
  2212. if (list_empty(&ctx->active_req_list)) {
  2213. CAM_WARN(CAM_ISP, "Active list empty on ctx: %u - EPOCH serviced before RUP",
  2214. ctx->ctx_id);
  2215. } else {
  2216. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  2217. if (req->request_id > ctx_isp->reported_req_id) {
  2218. request_id = req->request_id;
  2219. ctx_isp->reported_req_id = request_id;
  2220. break;
  2221. }
  2222. }
  2223. }
  2224. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  2225. /*
  2226. * If no valid request, wait for RUP shutter posted after buf done
  2227. */
  2228. if (request_id)
  2229. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2230. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2231. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2232. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2233. request_id);
  2234. return 0;
  2235. }
  2236. static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
  2237. struct cam_isp_context *ctx_isp, void *evt_data)
  2238. {
  2239. if (ctx_isp->frame_id == 1)
  2240. CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR",
  2241. __cam_isp_ctx_substate_val_to_type(
  2242. ctx_isp->substate_activated));
  2243. else
  2244. CAM_WARN_RATE_LIMIT(CAM_ISP,
  2245. "ctx_id:%d Unexpected reg update in activated Substate[%s] for frame_id:%lld",
  2246. ctx_isp->base->ctx_id,
  2247. __cam_isp_ctx_substate_val_to_type(
  2248. ctx_isp->substate_activated),
  2249. ctx_isp->frame_id);
  2250. return 0;
  2251. }
  2252. static int __cam_isp_ctx_reg_upd_in_applied_state(
  2253. struct cam_isp_context *ctx_isp, void *evt_data)
  2254. {
  2255. int rc = 0;
  2256. struct cam_ctx_request *req;
  2257. struct cam_context *ctx = ctx_isp->base;
  2258. struct cam_isp_ctx_req *req_isp;
  2259. uint64_t request_id = 0;
  2260. if (list_empty(&ctx->wait_req_list)) {
  2261. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  2262. goto end;
  2263. }
  2264. req = list_first_entry(&ctx->wait_req_list,
  2265. struct cam_ctx_request, list);
  2266. list_del_init(&req->list);
  2267. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2268. if (req_isp->num_fence_map_out != 0) {
  2269. list_add_tail(&req->list, &ctx->active_req_list);
  2270. ctx_isp->active_req_cnt++;
  2271. request_id = req->request_id;
  2272. CAM_DBG(CAM_REQ,
  2273. "move request %lld to active list(cnt = %d), ctx %u",
  2274. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2275. __cam_isp_ctx_update_event_record(ctx_isp,
  2276. CAM_ISP_CTX_EVENT_RUP, req);
  2277. } else {
  2278. /* no io config, so the request is completed. */
  2279. list_add_tail(&req->list, &ctx->free_req_list);
  2280. CAM_DBG(CAM_ISP,
  2281. "move active request %lld to free list(cnt = %d), ctx %u",
  2282. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2283. }
  2284. /*
  2285. * This function only called directly from applied and bubble applied
  2286. * state so change substate here.
  2287. */
  2288. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2289. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2290. __cam_isp_ctx_substate_val_to_type(
  2291. ctx_isp->substate_activated));
  2292. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2293. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id);
  2294. end:
  2295. return rc;
  2296. }
  2297. static int __cam_isp_ctx_notify_sof_in_activated_state(
  2298. struct cam_isp_context *ctx_isp, void *evt_data)
  2299. {
  2300. int rc = 0;
  2301. uint64_t request_id = 0;
  2302. struct cam_context *ctx = ctx_isp->base;
  2303. struct cam_ctx_request *req;
  2304. struct cam_isp_ctx_req *req_isp;
  2305. struct cam_hw_cmd_args hw_cmd_args;
  2306. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  2307. uint64_t last_cdm_done_req = 0;
  2308. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2309. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2310. if (!evt_data) {
  2311. CAM_ERR(CAM_ISP, "invalid event data");
  2312. return -EINVAL;
  2313. }
  2314. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2315. if (atomic_read(&ctx_isp->process_bubble)) {
  2316. if (list_empty(&ctx->active_req_list)) {
  2317. CAM_ERR(CAM_ISP,
  2318. "No available active req in bubble");
  2319. atomic_set(&ctx_isp->process_bubble, 0);
  2320. ctx_isp->bubble_frame_cnt = 0;
  2321. rc = -EINVAL;
  2322. return rc;
  2323. }
  2324. if (ctx_isp->last_sof_timestamp ==
  2325. ctx_isp->sof_timestamp_val) {
  2326. CAM_DBG(CAM_ISP,
  2327. "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
  2328. ctx_isp->sof_timestamp_val);
  2329. goto notify_only;
  2330. }
  2331. req = list_first_entry(&ctx->active_req_list,
  2332. struct cam_ctx_request, list);
  2333. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2334. if (ctx_isp->bubble_frame_cnt >= 1 &&
  2335. req_isp->bubble_detected) {
  2336. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2337. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  2338. isp_hw_cmd_args.cmd_type =
  2339. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  2340. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  2341. rc = ctx->hw_mgr_intf->hw_cmd(
  2342. ctx->hw_mgr_intf->hw_mgr_priv,
  2343. &hw_cmd_args);
  2344. if (rc) {
  2345. CAM_ERR(CAM_ISP, "HW command failed");
  2346. return rc;
  2347. }
  2348. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  2349. CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
  2350. last_cdm_done_req);
  2351. if (last_cdm_done_req >= req->request_id) {
  2352. CAM_DBG(CAM_ISP,
  2353. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  2354. req->request_id);
  2355. ctx_isp->bubble_frame_cnt = 0;
  2356. } else {
  2357. CAM_DBG(CAM_ISP,
  2358. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  2359. req->request_id);
  2360. req_isp->num_acked = 0;
  2361. req_isp->num_deferred_acks = 0;
  2362. ctx_isp->bubble_frame_cnt = 0;
  2363. req_isp->bubble_detected = false;
  2364. req_isp->cdm_reset_before_apply = true;
  2365. list_del_init(&req->list);
  2366. list_add(&req->list, &ctx->pending_req_list);
  2367. atomic_set(&ctx_isp->process_bubble, 0);
  2368. ctx_isp->active_req_cnt--;
  2369. CAM_DBG(CAM_REQ,
  2370. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  2371. req->request_id,
  2372. ctx_isp->active_req_cnt, ctx->ctx_id);
  2373. }
  2374. } else if (req_isp->bubble_detected) {
  2375. ctx_isp->bubble_frame_cnt++;
  2376. CAM_DBG(CAM_ISP,
  2377. "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
  2378. req->request_id,
  2379. ctx_isp->bubble_frame_cnt);
  2380. } else {
  2381. CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
  2382. req->request_id);
  2383. }
  2384. }
  2385. notify_only:
  2386. /*
  2387. * notify reqmgr with sof signal. Note, due to scheduling delay
  2388. * we can run into situation that two active requests has already
  2389. * be in the active queue while we try to do the notification.
  2390. * In this case, we need to skip the current notification. This
  2391. * helps the state machine to catch up the delay.
  2392. */
  2393. if (ctx_isp->active_req_cnt <= 2) {
  2394. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2395. list_for_each_entry(req, &ctx->active_req_list, list) {
  2396. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2397. if ((!req_isp->bubble_detected) &&
  2398. (req->request_id > ctx_isp->reported_req_id)) {
  2399. request_id = req->request_id;
  2400. __cam_isp_ctx_update_event_record(ctx_isp,
  2401. CAM_ISP_CTX_EVENT_EPOCH, req);
  2402. break;
  2403. }
  2404. }
  2405. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
  2406. request_id = 0;
  2407. if (request_id != 0)
  2408. ctx_isp->reported_req_id = request_id;
  2409. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2410. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2411. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2412. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2413. request_id);
  2414. }
  2415. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  2416. return 0;
  2417. }
  2418. static int __cam_isp_ctx_notify_eof_in_activated_state(
  2419. struct cam_isp_context *ctx_isp, void *evt_data)
  2420. {
  2421. int rc = 0;
  2422. /* notify reqmgr with eof signal */
  2423. rc = __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_EOF, ctx_isp);
  2424. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2425. CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0);
  2426. return rc;
  2427. }
  2428. static int __cam_isp_ctx_reg_upd_in_hw_error(
  2429. struct cam_isp_context *ctx_isp, void *evt_data)
  2430. {
  2431. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2432. return 0;
  2433. }
  2434. static int __cam_isp_ctx_sof_in_activated_state(
  2435. struct cam_isp_context *ctx_isp, void *evt_data)
  2436. {
  2437. int rc = 0;
  2438. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2439. struct cam_ctx_request *req = NULL;
  2440. struct cam_context *ctx = ctx_isp->base;
  2441. uint64_t request_id = 0;
  2442. /* First check if there is a valid request in active list */
  2443. list_for_each_entry(req, &ctx->active_req_list, list) {
  2444. if (req->request_id > ctx_isp->reported_req_id) {
  2445. request_id = req->request_id;
  2446. break;
  2447. }
  2448. }
  2449. /*
  2450. * If nothing in active list, current request might have not moved
  2451. * from wait to active list. This could happen if REG_UPDATE to sw
  2452. * is coming immediately after SOF
  2453. */
  2454. if (request_id == 0) {
  2455. req = list_first_entry(&ctx->wait_req_list,
  2456. struct cam_ctx_request, list);
  2457. if (req)
  2458. request_id = req->request_id;
  2459. }
  2460. if (!evt_data) {
  2461. CAM_ERR(CAM_ISP, "in valid sof event data");
  2462. return -EINVAL;
  2463. }
  2464. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2465. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2466. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2467. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u",
  2468. ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);
  2469. return rc;
  2470. }
  2471. static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2472. void *evt_data)
  2473. {
  2474. int rc = 0;
  2475. struct cam_ctx_request *req = NULL;
  2476. struct cam_isp_ctx_req *req_isp;
  2477. struct cam_context *ctx = ctx_isp->base;
  2478. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2479. CAM_DBG(CAM_ISP, "invalid RUP");
  2480. goto end;
  2481. }
  2482. /*
  2483. * This is for the first update. The initial setting will
  2484. * cause the reg_upd in the first frame.
  2485. */
  2486. if (!list_empty(&ctx->wait_req_list)) {
  2487. req = list_first_entry(&ctx->wait_req_list,
  2488. struct cam_ctx_request, list);
  2489. list_del_init(&req->list);
  2490. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2491. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2492. list_add_tail(&req->list, &ctx->free_req_list);
  2493. else
  2494. CAM_ERR(CAM_ISP,
  2495. "receive rup in unexpected state");
  2496. }
  2497. if (req != NULL) {
  2498. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2499. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2500. req->request_id);
  2501. }
  2502. end:
  2503. return rc;
  2504. }
  2505. static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
  2506. void *evt_data)
  2507. {
  2508. uint64_t request_id = 0;
  2509. uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
  2510. struct cam_ctx_request *req;
  2511. struct cam_isp_ctx_req *req_isp;
  2512. struct cam_context *ctx = ctx_isp->base;
  2513. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2514. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2515. if (!evt_data) {
  2516. CAM_ERR(CAM_ISP, "invalid event data");
  2517. return -EINVAL;
  2518. }
  2519. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2520. if (list_empty(&ctx->wait_req_list)) {
  2521. /*
  2522. * If no wait req in epoch, this is an error case.
  2523. * The recovery is to go back to sof state
  2524. */
  2525. CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
  2526. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2527. /* Send SOF event as empty frame*/
  2528. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2529. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2530. __cam_isp_ctx_update_event_record(ctx_isp,
  2531. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2532. goto end;
  2533. }
  2534. /* Update state prior to notifying CRM */
  2535. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2536. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2537. list);
  2538. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2539. req_isp->bubble_detected = true;
  2540. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2541. req_isp->cdm_reset_before_apply = false;
  2542. atomic_set(&ctx_isp->process_bubble, 1);
  2543. CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
  2544. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2545. if (req_isp->bubble_report) {
  2546. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2547. req->request_id, ctx_isp);
  2548. trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
  2549. req->request_id, ctx->ctx_id);
  2550. } else {
  2551. req_isp->bubble_report = 0;
  2552. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2553. req->request_id, ctx->ctx_id);
  2554. if (ctx_isp->active_req_cnt <= 1)
  2555. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2556. }
  2557. /*
  2558. * Always move the request to active list. Let buf done
  2559. * function handles the rest.
  2560. */
  2561. list_del_init(&req->list);
  2562. list_add_tail(&req->list, &ctx->active_req_list);
  2563. ctx_isp->active_req_cnt++;
  2564. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
  2565. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2566. /*
  2567. * Handle the deferred buf done after moving
  2568. * the bubble req to active req list.
  2569. */
  2570. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2571. ctx_isp, req);
  2572. /*
  2573. * Update the record before req pointer to
  2574. * other invalid req.
  2575. */
  2576. __cam_isp_ctx_update_event_record(ctx_isp,
  2577. CAM_ISP_CTX_EVENT_EPOCH, req);
  2578. /*
  2579. * Get the req again from active_req_list in case
  2580. * the active req cnt is 2.
  2581. */
  2582. list_for_each_entry(req, &ctx->active_req_list, list) {
  2583. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2584. if ((!req_isp->bubble_report) &&
  2585. (req->request_id > ctx_isp->reported_req_id)) {
  2586. request_id = req->request_id;
  2587. ctx_isp->reported_req_id = request_id;
  2588. CAM_DBG(CAM_ISP,
  2589. "ctx %d reported_req_id update to %lld",
  2590. ctx->ctx_id, ctx_isp->reported_req_id);
  2591. break;
  2592. }
  2593. }
  2594. if ((request_id != 0) && req_isp->bubble_detected)
  2595. sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR;
  2596. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2597. sof_event_status);
  2598. cam_req_mgr_debug_delay_detect();
  2599. trace_cam_delay_detect("ISP",
  2600. "bubble epoch_in_applied", req->request_id,
  2601. ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
  2602. CAM_DEFAULT_VALUE);
  2603. end:
  2604. if (request_id == 0) {
  2605. req = list_last_entry(&ctx->active_req_list,
  2606. struct cam_ctx_request, list);
  2607. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2608. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2609. } else {
  2610. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2611. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
  2612. }
  2613. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2614. __cam_isp_ctx_substate_val_to_type(
  2615. ctx_isp->substate_activated));
  2616. return 0;
  2617. }
  2618. static int __cam_isp_ctx_buf_done_in_sof(struct cam_isp_context *ctx_isp,
  2619. void *evt_data)
  2620. {
  2621. int rc = 0;
  2622. struct cam_isp_hw_done_event_data *done =
  2623. (struct cam_isp_hw_done_event_data *) evt_data;
  2624. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2625. return rc;
  2626. }
  2627. static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
  2628. void *evt_data)
  2629. {
  2630. int rc = 0;
  2631. struct cam_isp_hw_done_event_data *done =
  2632. (struct cam_isp_hw_done_event_data *) evt_data;
  2633. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2634. return rc;
  2635. }
  2636. static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
  2637. void *evt_data)
  2638. {
  2639. int rc = 0;
  2640. struct cam_context *ctx = ctx_isp->base;
  2641. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2642. struct cam_ctx_request *req;
  2643. if (!evt_data) {
  2644. CAM_ERR(CAM_ISP, "in valid sof event data");
  2645. return -EINVAL;
  2646. }
  2647. if (atomic_read(&ctx_isp->apply_in_progress))
  2648. CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
  2649. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2650. if (list_empty(&ctx->active_req_list))
  2651. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2652. else
  2653. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  2654. req = list_last_entry(&ctx->active_req_list,
  2655. struct cam_ctx_request, list);
  2656. if (req)
  2657. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2658. CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
  2659. req->request_id);
  2660. if (ctx_isp->frame_id == 1)
  2661. CAM_INFO(CAM_ISP,
  2662. "First SOF in EPCR ctx:%d frame_id:%lld next substate %s",
  2663. ctx->ctx_id, ctx_isp->frame_id,
  2664. __cam_isp_ctx_substate_val_to_type(
  2665. ctx_isp->substate_activated));
  2666. CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
  2667. ctx->ctx_id, ctx_isp->frame_id,
  2668. __cam_isp_ctx_substate_val_to_type(
  2669. ctx_isp->substate_activated));
  2670. return rc;
  2671. }
  2672. static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2673. void *evt_data)
  2674. {
  2675. int rc = 0;
  2676. struct cam_isp_hw_done_event_data *done =
  2677. (struct cam_isp_hw_done_event_data *) evt_data;
  2678. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2679. return rc;
  2680. }
  2681. static int __cam_isp_ctx_buf_done_in_bubble(
  2682. struct cam_isp_context *ctx_isp, void *evt_data)
  2683. {
  2684. int rc = 0;
  2685. struct cam_isp_hw_done_event_data *done =
  2686. (struct cam_isp_hw_done_event_data *) evt_data;
  2687. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2688. return rc;
  2689. }
  2690. static int __cam_isp_ctx_epoch_in_bubble_applied(
  2691. struct cam_isp_context *ctx_isp, void *evt_data)
  2692. {
  2693. uint64_t request_id = 0;
  2694. struct cam_ctx_request *req;
  2695. struct cam_isp_ctx_req *req_isp;
  2696. struct cam_context *ctx = ctx_isp->base;
  2697. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2698. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2699. if (!evt_data) {
  2700. CAM_ERR(CAM_ISP, "invalid event data");
  2701. return -EINVAL;
  2702. }
  2703. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2704. /*
  2705. * This means we missed the reg upd ack. So we need to
  2706. * transition to BUBBLE state again.
  2707. */
  2708. if (list_empty(&ctx->wait_req_list)) {
  2709. /*
  2710. * If no pending req in epoch, this is an error case.
  2711. * Just go back to the bubble state.
  2712. */
  2713. CAM_ERR(CAM_ISP, "ctx:%d No pending request.", ctx->ctx_id);
  2714. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2715. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2716. __cam_isp_ctx_update_event_record(ctx_isp,
  2717. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2718. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2719. goto end;
  2720. }
  2721. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2722. list);
  2723. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2724. req_isp->bubble_detected = true;
  2725. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  2726. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2727. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2728. req_isp->cdm_reset_before_apply = false;
  2729. if (req_isp->bubble_report) {
  2730. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2731. req->request_id, ctx_isp);
  2732. atomic_set(&ctx_isp->process_bubble, 1);
  2733. } else {
  2734. req_isp->bubble_report = 0;
  2735. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2736. req->request_id, ctx->ctx_id);
  2737. if (ctx_isp->active_req_cnt <= 1)
  2738. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2739. atomic_set(&ctx_isp->process_bubble, 1);
  2740. }
  2741. /*
  2742. * Always move the request to active list. Let buf done
  2743. * function handles the rest.
  2744. */
  2745. list_del_init(&req->list);
  2746. list_add_tail(&req->list, &ctx->active_req_list);
  2747. ctx_isp->active_req_cnt++;
  2748. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u",
  2749. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2750. /*
  2751. * Handle the deferred buf done after moving
  2752. * the bubble req to active req list.
  2753. */
  2754. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2755. ctx_isp, req);
  2756. if (!req_isp->bubble_detected) {
  2757. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2758. list);
  2759. req_isp->bubble_detected = true;
  2760. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2761. req_isp->cdm_reset_before_apply = false;
  2762. atomic_set(&ctx_isp->process_bubble, 1);
  2763. list_del_init(&req->list);
  2764. list_add_tail(&req->list, &ctx->active_req_list);
  2765. ctx_isp->active_req_cnt++;
  2766. }
  2767. if (!req_isp->bubble_report) {
  2768. if (req->request_id > ctx_isp->reported_req_id) {
  2769. request_id = req->request_id;
  2770. ctx_isp->reported_req_id = request_id;
  2771. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2772. CAM_REQ_MGR_SOF_EVENT_ERROR);
  2773. __cam_isp_ctx_update_event_record(ctx_isp,
  2774. CAM_ISP_CTX_EVENT_EPOCH, req);
  2775. } else {
  2776. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2777. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2778. __cam_isp_ctx_update_event_record(ctx_isp,
  2779. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2780. }
  2781. } else {
  2782. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2783. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2784. __cam_isp_ctx_update_event_record(ctx_isp,
  2785. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2786. }
  2787. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2788. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2789. __cam_isp_ctx_substate_val_to_type(
  2790. ctx_isp->substate_activated));
  2791. cam_req_mgr_debug_delay_detect();
  2792. trace_cam_delay_detect("ISP",
  2793. "bubble epoch_in_bubble_applied",
  2794. req->request_id, ctx->ctx_id,
  2795. ctx->link_hdl, ctx->session_hdl,
  2796. CAM_DEFAULT_VALUE);
  2797. end:
  2798. req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
  2799. list);
  2800. if (req)
  2801. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2802. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2803. return 0;
  2804. }
  2805. static int __cam_isp_ctx_buf_done_in_bubble_applied(
  2806. struct cam_isp_context *ctx_isp, void *evt_data)
  2807. {
  2808. int rc = 0;
  2809. struct cam_isp_hw_done_event_data *done =
  2810. (struct cam_isp_hw_done_event_data *) evt_data;
  2811. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2812. return rc;
  2813. }
  2814. static void get_notification_evt_params(uint32_t hw_error, uint32_t *fence_evt_cause,
  2815. uint32_t *req_mgr_err_code, uint32_t *recovery_type)
  2816. {
  2817. uint32_t err_type, err_code = 0, recovery_type_temp;
  2818. err_type = CAM_SYNC_ISP_EVENT_UNKNOWN;
  2819. recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2820. if (hw_error & CAM_ISP_HW_ERROR_OVERFLOW) {
  2821. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2822. err_type = CAM_SYNC_ISP_EVENT_OVERFLOW;
  2823. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2824. }
  2825. if (hw_error & CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW) {
  2826. err_code |= CAM_REQ_MGR_CSID_FIFO_OVERFLOW_ERROR;
  2827. err_type = CAM_SYNC_ISP_EVENT_CSID_OUTPUT_FIFO_OVERFLOW;
  2828. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2829. }
  2830. if (hw_error & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) {
  2831. err_code |= CAM_REQ_MGR_CSID_RECOVERY_OVERFLOW_ERROR;
  2832. err_type = CAM_SYNC_ISP_EVENT_RECOVERY_OVERFLOW;
  2833. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2834. }
  2835. if (hw_error & CAM_ISP_HW_ERROR_P2I_ERROR) {
  2836. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2837. err_type = CAM_SYNC_ISP_EVENT_P2I_ERROR;
  2838. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2839. }
  2840. if (hw_error & CAM_ISP_HW_ERROR_VIOLATION) {
  2841. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2842. err_type = CAM_SYNC_ISP_EVENT_VIOLATION;
  2843. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2844. }
  2845. if (hw_error & CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) {
  2846. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2847. err_type = CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW;
  2848. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2849. }
  2850. if (hw_error & CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR) {
  2851. err_code |= CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING;
  2852. err_type = CAM_SYNC_ISP_EVENT_CSID_SENSOR_SWITCH_ERROR;
  2853. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2854. }
  2855. if (hw_error & CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW) {
  2856. err_code |= CAM_REQ_MGR_CSID_LANE_FIFO_OVERFLOW_ERROR;
  2857. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2858. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2859. }
  2860. if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED) {
  2861. err_code |= CAM_REQ_MGR_CSID_RX_PKT_HDR_CORRUPTION;
  2862. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2863. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2864. }
  2865. if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA) {
  2866. err_code |= CAM_REQ_MGR_CSID_MISSING_PKT_HDR_DATA;
  2867. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2868. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2869. }
  2870. if (hw_error & CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME) {
  2871. err_code |= CAM_REQ_MGR_CSID_UNBOUNDED_FRAME;
  2872. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2873. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2874. }
  2875. if (hw_error & CAM_ISP_HW_ERROR_CSID_FRAME_SIZE) {
  2876. err_code |= CAM_REQ_MGR_CSID_PIXEL_COUNT_MISMATCH;
  2877. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2878. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2879. }
  2880. if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_EOT) {
  2881. err_code |= CAM_REQ_MGR_CSID_MISSING_EOT;
  2882. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2883. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2884. }
  2885. if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED) {
  2886. err_code |= CAM_REQ_MGR_CSID_RX_PKT_PAYLOAD_CORRUPTION;
  2887. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2888. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2889. }
  2890. if (recovery_type_temp == (CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY |
  2891. CAM_REQ_MGR_ERROR_TYPE_RECOVERY))
  2892. recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2893. if (!err_code)
  2894. err_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2895. *req_mgr_err_code = err_code;
  2896. *fence_evt_cause = err_type;
  2897. *recovery_type = recovery_type_temp;
  2898. }
  2899. static bool __cam_isp_ctx_request_can_reapply(
  2900. struct cam_isp_ctx_req *req_isp)
  2901. {
  2902. int i;
  2903. for (i = 0; i < req_isp->num_fence_map_out; i++)
  2904. if (req_isp->fence_map_out[i].sync_id == -1)
  2905. return false;
  2906. return true;
  2907. }
  2908. static int __cam_isp_ctx_validate_for_req_reapply_util(
  2909. struct cam_isp_context *ctx_isp)
  2910. {
  2911. int rc = 0;
  2912. struct cam_ctx_request *req_temp;
  2913. struct cam_ctx_request *req = NULL;
  2914. struct cam_isp_ctx_req *req_isp = NULL;
  2915. struct cam_context *ctx = ctx_isp->base;
  2916. /* Check for req in active/wait lists */
  2917. if (list_empty(&ctx->active_req_list)) {
  2918. CAM_DBG(CAM_ISP,
  2919. "Active request list empty for ctx: %u on link: 0x%x",
  2920. ctx->ctx_id, ctx->link_hdl);
  2921. if (list_empty(&ctx->wait_req_list)) {
  2922. CAM_WARN(CAM_ISP,
  2923. "No active/wait req for ctx: %u on link: 0x%x",
  2924. ctx->ctx_id, ctx->link_hdl);
  2925. rc = -EINVAL;
  2926. goto end;
  2927. }
  2928. }
  2929. /* Validate if all fences for active requests are not signaled */
  2930. if (!list_empty(&ctx->active_req_list)) {
  2931. list_for_each_entry_safe_reverse(req, req_temp,
  2932. &ctx->active_req_list, list) {
  2933. /*
  2934. * If some fences of the active request are already
  2935. * signaled, we should not do recovery for the buffer
  2936. * and timestamp consistency.
  2937. */
  2938. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2939. if (!__cam_isp_ctx_request_can_reapply(req_isp)) {
  2940. CAM_WARN(CAM_ISP,
  2941. "Req: %llu in ctx:%u on link: 0x%x fence has partially signaled, cannot do recovery",
  2942. req->request_id, ctx->ctx_id, ctx->link_hdl);
  2943. rc = -EINVAL;
  2944. goto end;
  2945. }
  2946. }
  2947. }
  2948. /* Move active requests to pending list */
  2949. if (!list_empty(&ctx->active_req_list)) {
  2950. list_for_each_entry_safe_reverse(req, req_temp,
  2951. &ctx->active_req_list, list) {
  2952. list_del_init(&req->list);
  2953. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  2954. ctx_isp->active_req_cnt--;
  2955. CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move active req %llu to pending",
  2956. ctx->ctx_id, ctx->link_hdl, req->request_id);
  2957. }
  2958. }
  2959. /* Move wait requests to pending list */
  2960. if (!list_empty(&ctx->wait_req_list)) {
  2961. list_for_each_entry_safe_reverse(req, req_temp, &ctx->wait_req_list, list) {
  2962. list_del_init(&req->list);
  2963. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  2964. CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move wait req %llu to pending",
  2965. ctx->ctx_id, ctx->link_hdl, req->request_id);
  2966. }
  2967. }
  2968. end:
  2969. return rc;
  2970. }
  2971. static int __cam_isp_ctx_handle_recovery_req_util(
  2972. struct cam_isp_context *ctx_isp)
  2973. {
  2974. int rc = 0;
  2975. struct cam_context *ctx = ctx_isp->base;
  2976. struct cam_ctx_request *req_to_reapply = NULL;
  2977. struct cam_isp_ctx_req *req_isp = NULL;
  2978. req_to_reapply = list_first_entry(&ctx->pending_req_list,
  2979. struct cam_ctx_request, list);
  2980. req_isp = (struct cam_isp_ctx_req *)req_to_reapply->req_priv;
  2981. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2982. ctx_isp->recovery_req_id = req_to_reapply->request_id;
  2983. atomic_set(&ctx_isp->internal_recovery_set, 1);
  2984. CAM_INFO(CAM_ISP, "Notify CRM to reapply req:%llu for ctx:%u link:0x%x",
  2985. req_to_reapply->request_id, ctx->ctx_id, ctx->link_hdl);
  2986. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  2987. CRM_KMD_WARN_INTERNAL_RECOVERY, req_to_reapply->request_id,
  2988. ctx_isp);
  2989. if (rc) {
  2990. /* Unable to notify CRM to do reapply back to normal */
  2991. CAM_WARN(CAM_ISP,
  2992. "ctx:%u unable to notify CRM for req %llu",
  2993. ctx->ctx_id, ctx_isp->recovery_req_id);
  2994. ctx_isp->recovery_req_id = 0;
  2995. atomic_set(&ctx_isp->internal_recovery_set, 0);
  2996. }
  2997. return rc;
  2998. }
  2999. static int __cam_isp_ctx_trigger_error_req_reapply(
  3000. struct cam_isp_context *ctx_isp)
  3001. {
  3002. int rc = 0;
  3003. struct cam_context *ctx = ctx_isp->base;
  3004. /*
  3005. * For errors that can be recoverable within kmd, we
  3006. * try to do internal hw stop, restart and notify CRM
  3007. * to do reapply with the help of bubble control flow.
  3008. */
  3009. rc = __cam_isp_ctx_validate_for_req_reapply_util(ctx_isp);
  3010. if (rc)
  3011. goto end;
  3012. rc = __cam_isp_ctx_handle_recovery_req_util(ctx_isp);
  3013. if (rc)
  3014. goto end;
  3015. CAM_DBG(CAM_ISP, "Triggered internal recovery for req:%llu ctx:%u on link 0x%x",
  3016. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3017. end:
  3018. return rc;
  3019. }
  3020. static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
  3021. void *evt_data)
  3022. {
  3023. int rc = 0;
  3024. enum cam_req_mgr_device_error error;
  3025. uint32_t i = 0;
  3026. bool found = 0;
  3027. struct cam_ctx_request *req = NULL;
  3028. struct cam_ctx_request *req_to_report = NULL;
  3029. struct cam_ctx_request *req_to_dump = NULL;
  3030. struct cam_ctx_request *req_temp;
  3031. struct cam_isp_ctx_req *req_isp = NULL;
  3032. struct cam_isp_ctx_req *req_isp_to_report = NULL;
  3033. uint64_t error_request_id;
  3034. struct cam_hw_fence_map_entry *fence_map_out = NULL;
  3035. uint32_t recovery_type, fence_evt_cause;
  3036. uint32_t req_mgr_err_code;
  3037. struct cam_context *ctx = ctx_isp->base;
  3038. struct cam_isp_hw_error_event_data *error_event_data =
  3039. (struct cam_isp_hw_error_event_data *)evt_data;
  3040. CAM_DBG(CAM_ISP, "Enter HW error_type = %d", error_event_data->error_type);
  3041. if (error_event_data->try_internal_recovery) {
  3042. rc = __cam_isp_ctx_trigger_error_req_reapply(ctx_isp);
  3043. if (!rc)
  3044. goto exit;
  3045. }
  3046. if (!ctx_isp->offline_context)
  3047. __cam_isp_ctx_pause_crm_timer(ctx);
  3048. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3049. get_notification_evt_params(error_event_data->error_type, &fence_evt_cause,
  3050. &req_mgr_err_code, &recovery_type);
  3051. /*
  3052. * The error is likely caused by first request on the active list.
  3053. * If active list is empty check wait list (maybe error hit as soon
  3054. * as RUP and we handle error before RUP.
  3055. */
  3056. if (list_empty(&ctx->active_req_list)) {
  3057. CAM_DBG(CAM_ISP,
  3058. "handling error with no active request");
  3059. if (list_empty(&ctx->wait_req_list)) {
  3060. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3061. "Error with no active/wait request");
  3062. goto end;
  3063. } else {
  3064. req_to_dump = list_first_entry(&ctx->wait_req_list,
  3065. struct cam_ctx_request, list);
  3066. }
  3067. } else {
  3068. req_to_dump = list_first_entry(&ctx->active_req_list,
  3069. struct cam_ctx_request, list);
  3070. }
  3071. req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
  3072. if (error_event_data->enable_req_dump)
  3073. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  3074. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3075. CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
  3076. list_for_each_entry_safe(req, req_temp,
  3077. &ctx->active_req_list, list) {
  3078. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3079. if (!req_isp->bubble_report) {
  3080. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  3081. req->request_id);
  3082. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3083. fence_map_out =
  3084. &req_isp->fence_map_out[i];
  3085. if (req_isp->fence_map_out[i].sync_id != -1) {
  3086. CAM_DBG(CAM_ISP,
  3087. "req %llu, Sync fd 0x%x ctx %u",
  3088. req->request_id,
  3089. req_isp->fence_map_out[i].sync_id,
  3090. ctx->ctx_id);
  3091. rc = cam_sync_signal(
  3092. fence_map_out->sync_id,
  3093. CAM_SYNC_STATE_SIGNALED_ERROR,
  3094. fence_evt_cause);
  3095. fence_map_out->sync_id = -1;
  3096. }
  3097. }
  3098. list_del_init(&req->list);
  3099. list_add_tail(&req->list, &ctx->free_req_list);
  3100. ctx_isp->active_req_cnt--;
  3101. } else {
  3102. found = 1;
  3103. break;
  3104. }
  3105. }
  3106. if (found)
  3107. goto move_to_pending;
  3108. list_for_each_entry_safe(req, req_temp,
  3109. &ctx->wait_req_list, list) {
  3110. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3111. if (!req_isp->bubble_report) {
  3112. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  3113. req->request_id);
  3114. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3115. fence_map_out =
  3116. &req_isp->fence_map_out[i];
  3117. if (req_isp->fence_map_out[i].sync_id != -1) {
  3118. CAM_DBG(CAM_ISP,
  3119. "req %llu, Sync fd 0x%x ctx %u",
  3120. req->request_id,
  3121. req_isp->fence_map_out[i].sync_id,
  3122. ctx->ctx_id);
  3123. rc = cam_sync_signal(
  3124. fence_map_out->sync_id,
  3125. CAM_SYNC_STATE_SIGNALED_ERROR,
  3126. fence_evt_cause);
  3127. fence_map_out->sync_id = -1;
  3128. }
  3129. }
  3130. list_del_init(&req->list);
  3131. list_add_tail(&req->list, &ctx->free_req_list);
  3132. } else {
  3133. found = 1;
  3134. break;
  3135. }
  3136. }
  3137. move_to_pending:
  3138. /*
  3139. * If bubble recovery is enabled on any request we need to move that
  3140. * request and all the subsequent requests to the pending list.
  3141. * Note:
  3142. * We need to traverse the active list in reverse order and add
  3143. * to head of pending list.
  3144. * e.g. pending current state: 10, 11 | active current state: 8, 9
  3145. * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
  3146. * final state - pending: 8, 9, 10, 11 | active: NULL
  3147. */
  3148. if (found) {
  3149. list_for_each_entry_safe_reverse(req, req_temp,
  3150. &ctx->active_req_list, list) {
  3151. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3152. list_del_init(&req->list);
  3153. list_add(&req->list, &ctx->pending_req_list);
  3154. ctx_isp->active_req_cnt--;
  3155. }
  3156. list_for_each_entry_safe_reverse(req, req_temp,
  3157. &ctx->wait_req_list, list) {
  3158. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3159. list_del_init(&req->list);
  3160. list_add(&req->list, &ctx->pending_req_list);
  3161. }
  3162. }
  3163. end:
  3164. do {
  3165. if (list_empty(&ctx->pending_req_list)) {
  3166. error_request_id = ctx_isp->last_applied_req_id;
  3167. req_isp = NULL;
  3168. break;
  3169. }
  3170. req = list_first_entry(&ctx->pending_req_list,
  3171. struct cam_ctx_request, list);
  3172. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3173. error_request_id = ctx_isp->last_applied_req_id;
  3174. if (req_isp->bubble_report) {
  3175. req_to_report = req;
  3176. req_isp_to_report = req_to_report->req_priv;
  3177. break;
  3178. }
  3179. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3180. if (req_isp->fence_map_out[i].sync_id != -1)
  3181. rc = cam_sync_signal(
  3182. req_isp->fence_map_out[i].sync_id,
  3183. CAM_SYNC_STATE_SIGNALED_ERROR,
  3184. fence_evt_cause);
  3185. req_isp->fence_map_out[i].sync_id = -1;
  3186. }
  3187. list_del_init(&req->list);
  3188. list_add_tail(&req->list, &ctx->free_req_list);
  3189. } while (req->request_id < ctx_isp->last_applied_req_id);
  3190. if (ctx_isp->offline_context)
  3191. goto exit;
  3192. error = CRM_KMD_ERR_FATAL;
  3193. if (req_isp_to_report && req_isp_to_report->bubble_report)
  3194. if (error_event_data->recovery_enabled)
  3195. error = CRM_KMD_ERR_BUBBLE;
  3196. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, error,
  3197. error_request_id, ctx_isp);
  3198. /*
  3199. * Need to send error occurred in KMD
  3200. * This will help UMD to take necessary action
  3201. * and to dump relevant info
  3202. */
  3203. if (error == CRM_KMD_ERR_FATAL)
  3204. __cam_isp_ctx_notify_v4l2_error_event(recovery_type,
  3205. req_mgr_err_code, error_request_id, ctx);
  3206. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
  3207. CAM_DBG(CAM_ISP, "Handling error done on ctx: %u", ctx->ctx_id);
  3208. exit:
  3209. return rc;
  3210. }
  3211. static int __cam_isp_ctx_fs2_sof_in_sof_state(
  3212. struct cam_isp_context *ctx_isp, void *evt_data)
  3213. {
  3214. int rc = 0;
  3215. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3216. struct cam_ctx_request *req;
  3217. struct cam_context *ctx = ctx_isp->base;
  3218. uint64_t request_id = 0;
  3219. if (!evt_data) {
  3220. CAM_ERR(CAM_ISP, "in valid sof event data");
  3221. return -EINVAL;
  3222. }
  3223. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  3224. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3225. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3226. if (!(list_empty(&ctx->wait_req_list)))
  3227. goto end;
  3228. if (ctx_isp->active_req_cnt <= 2) {
  3229. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3230. list_for_each_entry(req, &ctx->active_req_list, list) {
  3231. if (req->request_id > ctx_isp->reported_req_id) {
  3232. request_id = req->request_id;
  3233. ctx_isp->reported_req_id = request_id;
  3234. break;
  3235. }
  3236. }
  3237. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3238. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3239. }
  3240. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3241. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  3242. end:
  3243. return rc;
  3244. }
  3245. static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp,
  3246. void *evt_data)
  3247. {
  3248. int rc = 0;
  3249. struct cam_isp_hw_done_event_data *done =
  3250. (struct cam_isp_hw_done_event_data *) evt_data;
  3251. struct cam_context *ctx = ctx_isp->base;
  3252. int prev_active_req_cnt = 0;
  3253. int curr_req_id = 0;
  3254. struct cam_ctx_request *req;
  3255. prev_active_req_cnt = ctx_isp->active_req_cnt;
  3256. req = list_first_entry(&ctx->active_req_list,
  3257. struct cam_ctx_request, list);
  3258. if (req)
  3259. curr_req_id = req->request_id;
  3260. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  3261. if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) {
  3262. if (list_empty(&ctx->wait_req_list) &&
  3263. list_empty(&ctx->active_req_list)) {
  3264. CAM_DBG(CAM_ISP, "No request, move to SOF");
  3265. ctx_isp->substate_activated =
  3266. CAM_ISP_CTX_ACTIVATED_SOF;
  3267. if (ctx_isp->reported_req_id < curr_req_id) {
  3268. ctx_isp->reported_req_id = curr_req_id;
  3269. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  3270. curr_req_id,
  3271. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3272. }
  3273. }
  3274. }
  3275. return rc;
  3276. }
  3277. static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  3278. void *evt_data)
  3279. {
  3280. int rc = 0;
  3281. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  3282. return rc;
  3283. }
  3284. static int __cam_isp_ctx_fs2_buf_done_in_applied(
  3285. struct cam_isp_context *ctx_isp,
  3286. void *evt_data)
  3287. {
  3288. int rc = 0;
  3289. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  3290. return rc;
  3291. }
  3292. static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  3293. void *evt_data)
  3294. {
  3295. int rc = 0;
  3296. struct cam_ctx_request *req = NULL;
  3297. struct cam_isp_ctx_req *req_isp;
  3298. struct cam_context *ctx = ctx_isp->base;
  3299. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  3300. CAM_DBG(CAM_ISP, "invalid RUP");
  3301. goto end;
  3302. }
  3303. /*
  3304. * This is for the first update. The initial setting will
  3305. * cause the reg_upd in the first frame.
  3306. */
  3307. if (!list_empty(&ctx->wait_req_list)) {
  3308. req = list_first_entry(&ctx->wait_req_list,
  3309. struct cam_ctx_request, list);
  3310. list_del_init(&req->list);
  3311. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3312. if (req_isp->num_fence_map_out == req_isp->num_acked)
  3313. list_add_tail(&req->list, &ctx->free_req_list);
  3314. else
  3315. CAM_ERR(CAM_ISP,
  3316. "receive rup in unexpected state");
  3317. }
  3318. if (req != NULL) {
  3319. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3320. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3321. req->request_id);
  3322. }
  3323. end:
  3324. return rc;
  3325. }
  3326. static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
  3327. struct cam_isp_context *ctx_isp, void *evt_data)
  3328. {
  3329. int rc = 0;
  3330. struct cam_ctx_request *req = NULL;
  3331. struct cam_context *ctx = ctx_isp->base;
  3332. struct cam_isp_ctx_req *req_isp;
  3333. uint64_t request_id = 0;
  3334. if (list_empty(&ctx->wait_req_list)) {
  3335. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  3336. goto end;
  3337. }
  3338. req = list_first_entry(&ctx->wait_req_list,
  3339. struct cam_ctx_request, list);
  3340. list_del_init(&req->list);
  3341. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3342. if (req_isp->num_fence_map_out != 0) {
  3343. list_add_tail(&req->list, &ctx->active_req_list);
  3344. ctx_isp->active_req_cnt++;
  3345. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
  3346. req->request_id, ctx_isp->active_req_cnt);
  3347. } else {
  3348. /* no io config, so the request is completed. */
  3349. list_add_tail(&req->list, &ctx->free_req_list);
  3350. }
  3351. /*
  3352. * This function only called directly from applied and bubble applied
  3353. * state so change substate here.
  3354. */
  3355. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  3356. if (req_isp->num_fence_map_out != 1)
  3357. goto end;
  3358. if (ctx_isp->active_req_cnt <= 2) {
  3359. list_for_each_entry(req, &ctx->active_req_list, list) {
  3360. if (req->request_id > ctx_isp->reported_req_id) {
  3361. request_id = req->request_id;
  3362. ctx_isp->reported_req_id = request_id;
  3363. break;
  3364. }
  3365. }
  3366. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3367. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3368. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3369. }
  3370. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3371. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated));
  3372. end:
  3373. if (req != NULL && !rc) {
  3374. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3375. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3376. req->request_id);
  3377. }
  3378. return rc;
  3379. }
  3380. static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
  3381. struct cam_isp_context *ctx_isp)
  3382. {
  3383. struct cam_context *ctx = ctx_isp->base;
  3384. if ((++ctx_isp->aeb_error_cnt) <= CAM_ISP_CONTEXT_AEB_ERROR_CNT_MAX) {
  3385. CAM_WARN(CAM_ISP,
  3386. "AEB slave RDI's current request's SOF seen after next req is applied for ctx: %u on link: 0x%x last_applied_req: %llu err_cnt: %u",
  3387. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, ctx_isp->aeb_error_cnt);
  3388. return;
  3389. }
  3390. CAM_ERR(CAM_ISP,
  3391. "Fatal - AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x err_cnt: %u",
  3392. ctx->ctx_id, ctx->link_hdl, ctx_isp->aeb_error_cnt);
  3393. /* Pause CRM timer */
  3394. if (!ctx_isp->offline_context)
  3395. __cam_isp_ctx_pause_crm_timer(ctx);
  3396. /* Trigger reg dump */
  3397. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3398. /* Notify CRM on fatal error */
  3399. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL,
  3400. ctx_isp->last_applied_req_id, ctx_isp);
  3401. /* Notify userland on error */
  3402. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
  3403. CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx);
  3404. /* Change state to HALT, stop further processing of HW events */
  3405. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  3406. }
  3407. static int __cam_isp_ctx_trigger_internal_recovery(
  3408. bool sync_frame_drop, struct cam_isp_context *ctx_isp)
  3409. {
  3410. int rc = 0;
  3411. bool do_recovery = true;
  3412. struct cam_context *ctx = ctx_isp->base;
  3413. struct cam_ctx_request *req = NULL;
  3414. struct cam_isp_ctx_req *req_isp = NULL;
  3415. if (list_empty(&ctx->wait_req_list)) {
  3416. /*
  3417. * If the wait list is empty, and we encounter a "silent" frame drop
  3418. * then the settings applied on the previous frame, did not reflect
  3419. * at the next frame boundary, it's expected to latch a frame after.
  3420. * No need to recover. If it's an out of sync drop use pending req
  3421. */
  3422. if (sync_frame_drop && !list_empty(&ctx->pending_req_list))
  3423. req = list_first_entry(&ctx->pending_req_list,
  3424. struct cam_ctx_request, list);
  3425. else
  3426. do_recovery = false;
  3427. }
  3428. /* If both wait and pending list have no request to recover on */
  3429. if (!do_recovery) {
  3430. CAM_WARN(CAM_ISP,
  3431. "No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld",
  3432. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id,
  3433. ctx_isp->req_info.last_bufdone_req_id);
  3434. goto end;
  3435. }
  3436. if (!req) {
  3437. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  3438. if (req->request_id != ctx_isp->last_applied_req_id)
  3439. CAM_WARN(CAM_ISP,
  3440. "Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x",
  3441. req->request_id, ctx_isp->last_applied_req_id,
  3442. ctx->ctx_id, ctx->link_hdl);
  3443. }
  3444. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3445. /*
  3446. * Treat this as bubble, after recovery re-start from appropriate sub-state
  3447. * This will block servicing any further apply calls from CRM
  3448. */
  3449. atomic_set(&ctx_isp->internal_recovery_set, 1);
  3450. atomic_set(&ctx_isp->process_bubble, 1);
  3451. ctx_isp->recovery_req_id = req->request_id;
  3452. /* Wait for active request's to finish before issuing recovery */
  3453. if (ctx_isp->active_req_cnt) {
  3454. req_isp->bubble_detected = true;
  3455. CAM_WARN(CAM_ISP,
  3456. "Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x",
  3457. ctx_isp->active_req_cnt, ctx_isp->recovery_req_id,
  3458. ctx->ctx_id, ctx->link_hdl);
  3459. } else {
  3460. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  3461. ctx_isp->recovery_req_id, ctx_isp);
  3462. if (rc) {
  3463. /* Unable to do bubble recovery reset back to normal */
  3464. CAM_WARN(CAM_ISP,
  3465. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  3466. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3467. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  3468. goto end;
  3469. }
  3470. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  3471. list_del_init(&req->list);
  3472. list_add(&req->list, &ctx->pending_req_list);
  3473. }
  3474. end:
  3475. return rc;
  3476. }
  3477. static int __cam_isp_ctx_handle_secondary_events(
  3478. struct cam_isp_context *ctx_isp, void *evt_data)
  3479. {
  3480. int rc = 0;
  3481. bool recover = false, sync_frame_drop = false;
  3482. struct cam_context *ctx = ctx_isp->base;
  3483. struct cam_isp_hw_secondary_event_data *sec_evt_data =
  3484. (struct cam_isp_hw_secondary_event_data *)evt_data;
  3485. /* Current scheme to handle only for custom AEB */
  3486. if (!ctx_isp->aeb_enabled) {
  3487. CAM_WARN(CAM_ISP,
  3488. "Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u",
  3489. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3490. goto end;
  3491. }
  3492. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3493. CAM_WARN(CAM_ISP,
  3494. "Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u",
  3495. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3496. goto end;
  3497. }
  3498. /*
  3499. * In case of custom AEB ensure first exposure frame has
  3500. * not moved forward with its settings without second/third
  3501. * expoure frame coming in. Also track for bubble, in case of system
  3502. * delays it's possible for the IFE settings to be not written to
  3503. * HW on a given frame. If these scenarios occurs flag as error,
  3504. * and recover.
  3505. */
  3506. switch (sec_evt_data->evt_type) {
  3507. case CAM_ISP_HW_SEC_EVENT_SOF:
  3508. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3509. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
  3510. ctx_isp->last_applied_req_id);
  3511. /* Slave RDI's frame starting post IFE EPOCH - Fatal */
  3512. if ((ctx_isp->substate_activated ==
  3513. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3514. (ctx_isp->substate_activated ==
  3515. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED))
  3516. __cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp);
  3517. else
  3518. /* Reset error count */
  3519. ctx_isp->aeb_error_cnt = 0;
  3520. break;
  3521. case CAM_ISP_HW_SEC_EVENT_EPOCH:
  3522. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3523. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
  3524. ctx_isp->last_applied_req_id);
  3525. /*
  3526. * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP
  3527. * On such occasions use CSID CAMIF EPOCH for bubble detection, flag
  3528. * on detection and perform necessary bubble recovery
  3529. */
  3530. if ((ctx_isp->substate_activated ==
  3531. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3532. (ctx_isp->substate_activated ==
  3533. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
  3534. recover = true;
  3535. CAM_WARN(CAM_ISP,
  3536. "Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3537. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3538. }
  3539. break;
  3540. case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP:
  3541. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3542. CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
  3543. ctx_isp->last_applied_req_id);
  3544. /* Avoid recovery loop if frame is dropped at stream on */
  3545. if (!ctx_isp->frame_id) {
  3546. CAM_ERR(CAM_ISP,
  3547. "Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld",
  3548. ctx->ctx_id, ctx->link_hdl,
  3549. ctx_isp->frame_id, ctx_isp->last_applied_req_id);
  3550. rc = -EPERM;
  3551. break;
  3552. }
  3553. recover = true;
  3554. sync_frame_drop = true;
  3555. CAM_WARN(CAM_ISP,
  3556. "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3557. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3558. break;
  3559. default:
  3560. break;
  3561. }
  3562. if (recover && ctx_isp->do_internal_recovery)
  3563. rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp);
  3564. end:
  3565. return rc;
  3566. }
  3567. static struct cam_isp_ctx_irq_ops
  3568. cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3569. /* SOF */
  3570. {
  3571. .irq_ops = {
  3572. __cam_isp_ctx_handle_error,
  3573. __cam_isp_ctx_sof_in_activated_state,
  3574. __cam_isp_ctx_reg_upd_in_sof,
  3575. __cam_isp_ctx_notify_sof_in_activated_state,
  3576. __cam_isp_ctx_notify_eof_in_activated_state,
  3577. __cam_isp_ctx_buf_done_in_sof,
  3578. __cam_isp_ctx_handle_secondary_events,
  3579. },
  3580. },
  3581. /* APPLIED */
  3582. {
  3583. .irq_ops = {
  3584. __cam_isp_ctx_handle_error,
  3585. __cam_isp_ctx_sof_in_activated_state,
  3586. __cam_isp_ctx_reg_upd_in_applied_state,
  3587. __cam_isp_ctx_epoch_in_applied,
  3588. __cam_isp_ctx_notify_eof_in_activated_state,
  3589. __cam_isp_ctx_buf_done_in_applied,
  3590. __cam_isp_ctx_handle_secondary_events,
  3591. },
  3592. },
  3593. /* EPOCH */
  3594. {
  3595. .irq_ops = {
  3596. __cam_isp_ctx_handle_error,
  3597. __cam_isp_ctx_sof_in_epoch,
  3598. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3599. __cam_isp_ctx_notify_sof_in_activated_state,
  3600. __cam_isp_ctx_notify_eof_in_activated_state,
  3601. __cam_isp_ctx_buf_done_in_epoch,
  3602. __cam_isp_ctx_handle_secondary_events,
  3603. },
  3604. },
  3605. /* BUBBLE */
  3606. {
  3607. .irq_ops = {
  3608. __cam_isp_ctx_handle_error,
  3609. __cam_isp_ctx_sof_in_activated_state,
  3610. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3611. __cam_isp_ctx_notify_sof_in_activated_state,
  3612. __cam_isp_ctx_notify_eof_in_activated_state,
  3613. __cam_isp_ctx_buf_done_in_bubble,
  3614. __cam_isp_ctx_handle_secondary_events,
  3615. },
  3616. },
  3617. /* Bubble Applied */
  3618. {
  3619. .irq_ops = {
  3620. __cam_isp_ctx_handle_error,
  3621. __cam_isp_ctx_sof_in_activated_state,
  3622. __cam_isp_ctx_reg_upd_in_applied_state,
  3623. __cam_isp_ctx_epoch_in_bubble_applied,
  3624. NULL,
  3625. __cam_isp_ctx_buf_done_in_bubble_applied,
  3626. __cam_isp_ctx_handle_secondary_events,
  3627. },
  3628. },
  3629. /* HW ERROR */
  3630. {
  3631. .irq_ops = {
  3632. NULL,
  3633. __cam_isp_ctx_sof_in_activated_state,
  3634. __cam_isp_ctx_reg_upd_in_hw_error,
  3635. NULL,
  3636. NULL,
  3637. NULL,
  3638. },
  3639. },
  3640. /* HALT */
  3641. {
  3642. },
  3643. };
  3644. static struct cam_isp_ctx_irq_ops
  3645. cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3646. /* SOF */
  3647. {
  3648. .irq_ops = {
  3649. __cam_isp_ctx_handle_error,
  3650. __cam_isp_ctx_fs2_sof_in_sof_state,
  3651. __cam_isp_ctx_fs2_reg_upd_in_sof,
  3652. __cam_isp_ctx_fs2_sof_in_sof_state,
  3653. __cam_isp_ctx_notify_eof_in_activated_state,
  3654. NULL,
  3655. },
  3656. },
  3657. /* APPLIED */
  3658. {
  3659. .irq_ops = {
  3660. __cam_isp_ctx_handle_error,
  3661. __cam_isp_ctx_sof_in_activated_state,
  3662. __cam_isp_ctx_fs2_reg_upd_in_applied_state,
  3663. __cam_isp_ctx_epoch_in_applied,
  3664. __cam_isp_ctx_notify_eof_in_activated_state,
  3665. __cam_isp_ctx_fs2_buf_done_in_applied,
  3666. },
  3667. },
  3668. /* EPOCH */
  3669. {
  3670. .irq_ops = {
  3671. __cam_isp_ctx_handle_error,
  3672. __cam_isp_ctx_sof_in_epoch,
  3673. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3674. __cam_isp_ctx_notify_sof_in_activated_state,
  3675. __cam_isp_ctx_notify_eof_in_activated_state,
  3676. __cam_isp_ctx_fs2_buf_done_in_epoch,
  3677. },
  3678. },
  3679. /* BUBBLE */
  3680. {
  3681. .irq_ops = {
  3682. __cam_isp_ctx_handle_error,
  3683. __cam_isp_ctx_sof_in_activated_state,
  3684. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3685. __cam_isp_ctx_notify_sof_in_activated_state,
  3686. __cam_isp_ctx_notify_eof_in_activated_state,
  3687. __cam_isp_ctx_buf_done_in_bubble,
  3688. },
  3689. },
  3690. /* Bubble Applied */
  3691. {
  3692. .irq_ops = {
  3693. __cam_isp_ctx_handle_error,
  3694. __cam_isp_ctx_sof_in_activated_state,
  3695. __cam_isp_ctx_reg_upd_in_applied_state,
  3696. __cam_isp_ctx_epoch_in_bubble_applied,
  3697. NULL,
  3698. __cam_isp_ctx_buf_done_in_bubble_applied,
  3699. },
  3700. },
  3701. /* HW ERROR */
  3702. {
  3703. .irq_ops = {
  3704. NULL,
  3705. __cam_isp_ctx_sof_in_activated_state,
  3706. __cam_isp_ctx_reg_upd_in_hw_error,
  3707. NULL,
  3708. NULL,
  3709. NULL,
  3710. },
  3711. },
  3712. /* HALT */
  3713. {
  3714. },
  3715. };
  3716. static struct cam_isp_ctx_irq_ops
  3717. cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3718. /* SOF */
  3719. {
  3720. .irq_ops = {
  3721. __cam_isp_ctx_handle_error,
  3722. NULL,
  3723. NULL,
  3724. NULL,
  3725. NULL,
  3726. NULL,
  3727. },
  3728. },
  3729. /* APPLIED */
  3730. {
  3731. .irq_ops = {
  3732. __cam_isp_ctx_handle_error,
  3733. __cam_isp_ctx_sof_in_activated_state,
  3734. __cam_isp_ctx_reg_upd_in_applied_state,
  3735. __cam_isp_ctx_offline_epoch_in_activated_state,
  3736. NULL,
  3737. __cam_isp_ctx_buf_done_in_applied,
  3738. },
  3739. },
  3740. /* EPOCH */
  3741. {
  3742. .irq_ops = {
  3743. __cam_isp_ctx_handle_error,
  3744. __cam_isp_ctx_sof_in_activated_state,
  3745. NULL,
  3746. __cam_isp_ctx_offline_epoch_in_activated_state,
  3747. NULL,
  3748. __cam_isp_ctx_buf_done_in_epoch,
  3749. },
  3750. },
  3751. /* BUBBLE */
  3752. {
  3753. },
  3754. /* Bubble Applied */
  3755. {
  3756. },
  3757. /* HW ERROR */
  3758. {
  3759. .irq_ops = {
  3760. NULL,
  3761. __cam_isp_ctx_sof_in_activated_state,
  3762. __cam_isp_ctx_reg_upd_in_hw_error,
  3763. NULL,
  3764. NULL,
  3765. NULL,
  3766. },
  3767. },
  3768. /* HALT */
  3769. {
  3770. },
  3771. };
  3772. static inline int cam_isp_context_apply_evt_injection(struct cam_context *ctx)
  3773. {
  3774. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  3775. struct cam_hw_inject_evt_param *evt_inject_params = &ctx_isp->evt_inject_params;
  3776. struct cam_common_evt_inject_data inject_evt = {0};
  3777. int rc;
  3778. inject_evt.evt_params = evt_inject_params;
  3779. rc = cam_context_apply_evt_injection(ctx, &inject_evt);
  3780. if (rc)
  3781. CAM_ERR(CAM_ISP, "Fail to apply event injection ctx_id: %u req_id: %u",
  3782. ctx->ctx_id, evt_inject_params->req_id);
  3783. evt_inject_params->is_valid = false;
  3784. return rc;
  3785. }
  3786. static int __cam_isp_ctx_apply_req_in_activated_state(
  3787. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
  3788. enum cam_isp_ctx_activated_substate next_state)
  3789. {
  3790. int rc = 0;
  3791. struct cam_ctx_request *req;
  3792. struct cam_ctx_request *active_req = NULL;
  3793. struct cam_isp_ctx_req *req_isp;
  3794. struct cam_isp_ctx_req *active_req_isp;
  3795. struct cam_isp_context *ctx_isp = NULL;
  3796. struct cam_hw_config_args cfg = {0};
  3797. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3798. if (apply->re_apply)
  3799. if (apply->request_id <= ctx_isp->last_applied_req_id) {
  3800. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3801. "ctx_id:%d Trying to reapply the same request %llu again",
  3802. ctx->ctx_id,
  3803. apply->request_id);
  3804. return 0;
  3805. }
  3806. if (list_empty(&ctx->pending_req_list)) {
  3807. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3808. "ctx_id:%d No available request for Apply id %lld",
  3809. ctx->ctx_id,
  3810. apply->request_id);
  3811. rc = -EFAULT;
  3812. goto end;
  3813. }
  3814. /*
  3815. * When the pipeline has issue, the requests can be queued up in the
  3816. * pipeline. In this case, we should reject the additional request.
  3817. * The maximum number of request allowed to be outstanding is 2.
  3818. *
  3819. */
  3820. if (atomic_read(&ctx_isp->process_bubble)) {
  3821. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3822. "ctx_id:%d Processing bubble cannot apply Request Id %llu",
  3823. ctx->ctx_id,
  3824. apply->request_id);
  3825. rc = -EFAULT;
  3826. goto end;
  3827. }
  3828. /*
  3829. * When isp processing internal recovery, the crm may still apply
  3830. * req to isp ctx. In this case, we should reject this req apply.
  3831. */
  3832. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3833. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3834. "ctx_id:%d Processing recovery cannot apply Request Id %lld",
  3835. ctx->ctx_id,
  3836. apply->request_id);
  3837. rc = -EAGAIN;
  3838. goto end;
  3839. }
  3840. spin_lock_bh(&ctx->lock);
  3841. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  3842. list);
  3843. spin_unlock_bh(&ctx->lock);
  3844. /*
  3845. * Check whether the request id is matching the tip, if not, this means
  3846. * we are in the middle of the error handling. Need to reject this apply
  3847. */
  3848. if (req->request_id != apply->request_id) {
  3849. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3850. "ctx_id:%d Invalid Request Id asking %llu existing %llu",
  3851. ctx->ctx_id,
  3852. apply->request_id, req->request_id);
  3853. rc = -EFAULT;
  3854. goto end;
  3855. }
  3856. CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
  3857. req->request_id,
  3858. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
  3859. ctx->ctx_id);
  3860. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3861. if (ctx_isp->active_req_cnt >= 2) {
  3862. CAM_WARN_RATE_LIMIT(CAM_ISP,
  3863. "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u",
  3864. req->request_id,
  3865. ctx_isp->active_req_cnt,
  3866. ctx->ctx_id);
  3867. spin_lock_bh(&ctx->lock);
  3868. if (!list_empty(&ctx->active_req_list))
  3869. active_req = list_first_entry(&ctx->active_req_list,
  3870. struct cam_ctx_request, list);
  3871. else
  3872. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3873. "WARNING: should not happen (cnt = %d) but active_list empty",
  3874. ctx_isp->active_req_cnt);
  3875. spin_unlock_bh(&ctx->lock);
  3876. if (active_req) {
  3877. active_req_isp =
  3878. (struct cam_isp_ctx_req *) active_req->req_priv;
  3879. __cam_isp_ctx_handle_buf_done_fail_log(
  3880. active_req->request_id, active_req_isp,
  3881. ctx_isp->isp_device_type);
  3882. }
  3883. rc = -EFAULT;
  3884. goto end;
  3885. }
  3886. req_isp->bubble_report = apply->report_if_bubble;
  3887. /*
  3888. * Reset all buf done/bubble flags for the req being applied
  3889. * If internal recovery has led to re-apply of same
  3890. * request, clear all stale entities
  3891. */
  3892. req_isp->num_acked = 0;
  3893. req_isp->num_deferred_acks = 0;
  3894. req_isp->cdm_reset_before_apply = false;
  3895. req_isp->bubble_detected = false;
  3896. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3897. cfg.request_id = req->request_id;
  3898. cfg.hw_update_entries = req_isp->cfg;
  3899. cfg.num_hw_update_entries = req_isp->num_cfg;
  3900. cfg.priv = &req_isp->hw_update_data;
  3901. cfg.init_packet = 0;
  3902. cfg.reapply_type = req_isp->reapply_type;
  3903. cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
  3904. if ((ctx_isp->evt_inject_params.is_valid) &&
  3905. (req->request_id == ctx_isp->evt_inject_params.req_id)) {
  3906. rc = cam_isp_context_apply_evt_injection(ctx_isp->base);
  3907. if (!rc)
  3908. goto end;
  3909. }
  3910. atomic_set(&ctx_isp->apply_in_progress, 1);
  3911. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  3912. if (!rc) {
  3913. spin_lock_bh(&ctx->lock);
  3914. ctx_isp->substate_activated = next_state;
  3915. ctx_isp->last_applied_req_id = apply->request_id;
  3916. list_del_init(&req->list);
  3917. if (atomic_read(&ctx_isp->internal_recovery_set))
  3918. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  3919. else
  3920. list_add_tail(&req->list, &ctx->wait_req_list);
  3921. CAM_DBG(CAM_ISP, "new substate Substate[%s], applied req %lld",
  3922. __cam_isp_ctx_substate_val_to_type(next_state),
  3923. ctx_isp->last_applied_req_id);
  3924. spin_unlock_bh(&ctx->lock);
  3925. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3926. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  3927. req->request_id);
  3928. __cam_isp_ctx_update_event_record(ctx_isp,
  3929. CAM_ISP_CTX_EVENT_APPLY, req);
  3930. } else if (rc == -EALREADY) {
  3931. spin_lock_bh(&ctx->lock);
  3932. req_isp->bubble_detected = true;
  3933. req_isp->cdm_reset_before_apply = false;
  3934. atomic_set(&ctx_isp->process_bubble, 1);
  3935. list_del_init(&req->list);
  3936. list_add(&req->list, &ctx->active_req_list);
  3937. ctx_isp->active_req_cnt++;
  3938. spin_unlock_bh(&ctx->lock);
  3939. CAM_DBG(CAM_REQ,
  3940. "move request %lld to active list(cnt = %d), ctx %u",
  3941. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  3942. } else {
  3943. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3944. "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
  3945. ctx->ctx_id, apply->request_id, rc);
  3946. }
  3947. atomic_set(&ctx_isp->apply_in_progress, 0);
  3948. end:
  3949. return rc;
  3950. }
  3951. static int __cam_isp_ctx_apply_req_in_sof(
  3952. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3953. {
  3954. int rc = 0;
  3955. struct cam_isp_context *ctx_isp =
  3956. (struct cam_isp_context *) ctx->ctx_priv;
  3957. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3958. __cam_isp_ctx_substate_val_to_type(
  3959. ctx_isp->substate_activated));
  3960. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3961. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3962. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3963. __cam_isp_ctx_substate_val_to_type(
  3964. ctx_isp->substate_activated));
  3965. if (rc)
  3966. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3967. __cam_isp_ctx_substate_val_to_type(
  3968. ctx_isp->substate_activated), rc);
  3969. return rc;
  3970. }
  3971. static int __cam_isp_ctx_apply_req_in_epoch(
  3972. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3973. {
  3974. int rc = 0;
  3975. struct cam_isp_context *ctx_isp =
  3976. (struct cam_isp_context *) ctx->ctx_priv;
  3977. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3978. __cam_isp_ctx_substate_val_to_type(
  3979. ctx_isp->substate_activated));
  3980. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3981. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3982. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3983. __cam_isp_ctx_substate_val_to_type(
  3984. ctx_isp->substate_activated));
  3985. if (rc)
  3986. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3987. __cam_isp_ctx_substate_val_to_type(
  3988. ctx_isp->substate_activated), rc);
  3989. return rc;
  3990. }
  3991. static int __cam_isp_ctx_apply_req_in_bubble(
  3992. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3993. {
  3994. int rc = 0;
  3995. struct cam_isp_context *ctx_isp =
  3996. (struct cam_isp_context *) ctx->ctx_priv;
  3997. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3998. __cam_isp_ctx_substate_val_to_type(
  3999. ctx_isp->substate_activated));
  4000. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4001. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
  4002. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4003. __cam_isp_ctx_substate_val_to_type(
  4004. ctx_isp->substate_activated));
  4005. if (rc)
  4006. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  4007. __cam_isp_ctx_substate_val_to_type(
  4008. ctx_isp->substate_activated), rc);
  4009. return rc;
  4010. }
  4011. static int __cam_isp_ctx_apply_default_req_settings(
  4012. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4013. {
  4014. int rc = 0;
  4015. struct cam_isp_context *isp_ctx =
  4016. (struct cam_isp_context *) ctx->ctx_priv;
  4017. struct cam_hw_cmd_args hw_cmd_args;
  4018. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4019. hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx;
  4020. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4021. isp_hw_cmd_args.cmd_type =
  4022. CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG;
  4023. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4024. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4025. &hw_cmd_args);
  4026. if (rc)
  4027. CAM_ERR(CAM_ISP,
  4028. "Failed to apply default settings rc %d", rc);
  4029. else
  4030. CAM_DBG(CAM_ISP, "Applied default settings rc %d", rc);
  4031. return rc;
  4032. }
  4033. static void *cam_isp_ctx_user_dump_req_list(
  4034. void *dump_struct, uint8_t *addr_ptr)
  4035. {
  4036. struct list_head *head = NULL;
  4037. uint64_t *addr;
  4038. struct cam_ctx_request *req, *req_temp;
  4039. head = (struct list_head *)dump_struct;
  4040. addr = (uint64_t *)addr_ptr;
  4041. if (!list_empty(head)) {
  4042. list_for_each_entry_safe(req, req_temp, head, list) {
  4043. *addr++ = req->request_id;
  4044. }
  4045. }
  4046. return addr;
  4047. }
  4048. static void *cam_isp_ctx_user_dump_active_requests(
  4049. void *dump_struct, uint8_t *addr_ptr)
  4050. {
  4051. uint64_t *addr;
  4052. struct cam_ctx_request *req;
  4053. req = (struct cam_ctx_request *)dump_struct;
  4054. addr = (uint64_t *)addr_ptr;
  4055. *addr++ = req->request_id;
  4056. return addr;
  4057. }
  4058. static int __cam_isp_ctx_dump_req_info(
  4059. struct cam_context *ctx,
  4060. struct cam_ctx_request *req,
  4061. struct cam_common_hw_dump_args *dump_args)
  4062. {
  4063. int i, rc = 0;
  4064. uint32_t min_len;
  4065. size_t remain_len;
  4066. struct cam_isp_ctx_req *req_isp;
  4067. struct cam_isp_context *ctx_isp;
  4068. struct cam_ctx_request *req_temp;
  4069. if (!req || !ctx || !dump_args) {
  4070. CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK",
  4071. req, ctx, dump_args);
  4072. return -EINVAL;
  4073. }
  4074. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4075. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  4076. if (dump_args->buf_len <= dump_args->offset) {
  4077. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  4078. dump_args->buf_len, dump_args->offset);
  4079. return -ENOSPC;
  4080. }
  4081. remain_len = dump_args->buf_len - dump_args->offset;
  4082. min_len = sizeof(struct cam_isp_context_dump_header) +
  4083. (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS *
  4084. req_isp->num_fence_map_out *
  4085. sizeof(uint64_t));
  4086. if (remain_len < min_len) {
  4087. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  4088. remain_len, min_len);
  4089. return -ENOSPC;
  4090. }
  4091. /* Dump pending request list */
  4092. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4093. &ctx->pending_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_PENDING_REQUESTS:");
  4094. if (rc) {
  4095. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Pending request dump failed, rc: %d",
  4096. rc);
  4097. return rc;
  4098. }
  4099. /* Dump applied request list */
  4100. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4101. &ctx->wait_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_APPLIED_REQUESTS:");
  4102. if (rc) {
  4103. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Applied request dump failed, rc: %d",
  4104. rc);
  4105. return rc;
  4106. }
  4107. /* Dump active request list */
  4108. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4109. &ctx->active_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_ACTIVE_REQUESTS:");
  4110. if (rc) {
  4111. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Active request dump failed, rc: %d",
  4112. rc);
  4113. return rc;
  4114. }
  4115. /* Dump active request fences */
  4116. if (!list_empty(&ctx->active_req_list)) {
  4117. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  4118. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4119. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4120. rc = cam_common_user_dump_helper(dump_args,
  4121. cam_isp_ctx_user_dump_active_requests,
  4122. req, sizeof(uint64_t),
  4123. "ISP_OUT_FENCE_REQUEST_ACTIVE.%s.%u.%d:",
  4124. __cam_isp_ife_sfe_resource_handle_id_to_type(
  4125. req_isp->fence_map_out[i].resource_handle),
  4126. req_isp->fence_map_out[i].image_buf_addr[0],
  4127. req_isp->fence_map_out[i].sync_id);
  4128. if (rc) {
  4129. CAM_ERR(CAM_ISP,
  4130. "CAM_ISP_CONTEXT DUMP_REQ_INFO: Dump failed, rc: %d",
  4131. rc);
  4132. return rc;
  4133. }
  4134. }
  4135. }
  4136. }
  4137. return rc;
  4138. }
  4139. static void *cam_isp_ctx_user_dump_timer(
  4140. void *dump_struct, uint8_t *addr_ptr)
  4141. {
  4142. struct cam_ctx_request *req = NULL;
  4143. struct cam_isp_ctx_req *req_isp = NULL;
  4144. uint64_t *addr;
  4145. ktime_t cur_time;
  4146. req = (struct cam_ctx_request *)dump_struct;
  4147. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4148. cur_time = ktime_get();
  4149. addr = (uint64_t *)addr_ptr;
  4150. *addr++ = req->request_id;
  4151. *addr++ = ktime_to_timespec64(
  4152. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_sec;
  4153. *addr++ = ktime_to_timespec64(
  4154. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_nsec / NSEC_PER_USEC;
  4155. *addr++ = ktime_to_timespec64(cur_time).tv_sec;
  4156. *addr++ = ktime_to_timespec64(cur_time).tv_nsec / NSEC_PER_USEC;
  4157. return addr;
  4158. }
  4159. static void *cam_isp_ctx_user_dump_stream_info(
  4160. void *dump_struct, uint8_t *addr_ptr)
  4161. {
  4162. struct cam_context *ctx = NULL;
  4163. int32_t *addr;
  4164. ctx = (struct cam_context *)dump_struct;
  4165. addr = (int32_t *)addr_ptr;
  4166. *addr++ = ctx->ctx_id;
  4167. *addr++ = ctx->dev_hdl;
  4168. *addr++ = ctx->link_hdl;
  4169. return addr;
  4170. }
  4171. static int __cam_isp_ctx_dump_in_top_state(
  4172. struct cam_context *ctx,
  4173. struct cam_req_mgr_dump_info *dump_info)
  4174. {
  4175. int rc = 0;
  4176. bool dump_only_event_record = false;
  4177. size_t buf_len;
  4178. size_t remain_len;
  4179. ktime_t cur_time;
  4180. uint32_t min_len;
  4181. uint64_t diff;
  4182. uintptr_t cpu_addr;
  4183. struct cam_isp_context *ctx_isp;
  4184. struct cam_ctx_request *req = NULL;
  4185. struct cam_isp_ctx_req *req_isp;
  4186. struct cam_ctx_request *req_temp;
  4187. struct cam_hw_dump_args ife_dump_args;
  4188. struct cam_common_hw_dump_args dump_args;
  4189. struct cam_hw_cmd_args hw_cmd_args;
  4190. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4191. spin_lock_bh(&ctx->lock);
  4192. list_for_each_entry_safe(req, req_temp,
  4193. &ctx->active_req_list, list) {
  4194. if (req->request_id == dump_info->req_id) {
  4195. CAM_INFO(CAM_ISP, "isp dump active list req: %lld",
  4196. dump_info->req_id);
  4197. goto hw_dump;
  4198. }
  4199. }
  4200. list_for_each_entry_safe(req, req_temp,
  4201. &ctx->wait_req_list, list) {
  4202. if (req->request_id == dump_info->req_id) {
  4203. CAM_INFO(CAM_ISP, "isp dump wait list req: %lld",
  4204. dump_info->req_id);
  4205. goto hw_dump;
  4206. }
  4207. }
  4208. goto end;
  4209. hw_dump:
  4210. rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
  4211. &cpu_addr, &buf_len);
  4212. if (rc) {
  4213. CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
  4214. dump_info->buf_handle, rc);
  4215. goto end;
  4216. }
  4217. if (buf_len <= dump_info->offset) {
  4218. spin_unlock_bh(&ctx->lock);
  4219. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  4220. buf_len, dump_info->offset);
  4221. return -ENOSPC;
  4222. }
  4223. remain_len = buf_len - dump_info->offset;
  4224. min_len = sizeof(struct cam_isp_context_dump_header) +
  4225. (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
  4226. if (remain_len < min_len) {
  4227. spin_unlock_bh(&ctx->lock);
  4228. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  4229. remain_len, min_len);
  4230. return -ENOSPC;
  4231. }
  4232. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4233. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4234. cur_time = ktime_get();
  4235. diff = ktime_us_delta(
  4236. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY],
  4237. cur_time);
  4238. if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
  4239. CAM_INFO(CAM_ISP, "req %lld found no error",
  4240. req->request_id);
  4241. dump_only_event_record = true;
  4242. }
  4243. dump_args.req_id = dump_info->req_id;
  4244. dump_args.cpu_addr = cpu_addr;
  4245. dump_args.buf_len = buf_len;
  4246. dump_args.offset = dump_info->offset;
  4247. dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4248. /* Dump time info */
  4249. rc = cam_common_user_dump_helper(&dump_args, cam_isp_ctx_user_dump_timer,
  4250. req, sizeof(uint64_t), "ISP_CTX_DUMP:");
  4251. if (rc) {
  4252. CAM_ERR(CAM_ISP, "Time dump fail %lld, rc: %d",
  4253. req->request_id, rc);
  4254. goto end;
  4255. }
  4256. dump_info->offset = dump_args.offset;
  4257. /* Dump stream info */
  4258. ctx->ctxt_to_hw_map = ctx_isp->hw_ctx;
  4259. if (ctx->hw_mgr_intf->hw_dump) {
  4260. /* Dump first part of stream info from isp context */
  4261. rc = cam_common_user_dump_helper(&dump_args,
  4262. cam_isp_ctx_user_dump_stream_info, ctx,
  4263. sizeof(int32_t), "ISP_STREAM_INFO_FROM_CTX:");
  4264. if (rc) {
  4265. CAM_ERR(CAM_ISP, "ISP CTX stream info dump fail %lld, rc: %d",
  4266. req->request_id, rc);
  4267. goto end;
  4268. }
  4269. /* Dump second part of stream info from ife hw manager */
  4270. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  4271. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4272. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_DUMP_STREAM_INFO;
  4273. isp_hw_cmd_args.cmd_data = &dump_args;
  4274. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4275. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args);
  4276. if (rc) {
  4277. CAM_ERR(CAM_ISP, "IFE HW MGR stream info dump fail %lld, rc: %d",
  4278. req->request_id, rc);
  4279. goto end;
  4280. }
  4281. dump_info->offset = dump_args.offset;
  4282. }
  4283. /* Dump event record */
  4284. rc = __cam_isp_ctx_dump_event_record(ctx_isp, &dump_args);
  4285. if (rc) {
  4286. CAM_ERR(CAM_ISP, "Event record dump fail %lld, rc: %d",
  4287. req->request_id, rc);
  4288. goto end;
  4289. }
  4290. dump_info->offset = dump_args.offset;
  4291. if (dump_only_event_record) {
  4292. goto end;
  4293. }
  4294. /* Dump state monitor array */
  4295. rc = __cam_isp_ctx_user_dump_state_monitor_array(ctx_isp, &dump_args);
  4296. if (rc) {
  4297. CAM_ERR(CAM_ISP, "Dump event fail %lld, rc: %d",
  4298. req->request_id, rc);
  4299. goto end;
  4300. }
  4301. /* Dump request info */
  4302. rc = __cam_isp_ctx_dump_req_info(ctx, req, &dump_args);
  4303. if (rc) {
  4304. CAM_ERR(CAM_ISP, "Dump Req info fail %lld, rc: %d",
  4305. req->request_id, rc);
  4306. goto end;
  4307. }
  4308. spin_unlock_bh(&ctx->lock);
  4309. /* Dump CSID, VFE, and SFE info */
  4310. dump_info->offset = dump_args.offset;
  4311. if (ctx->hw_mgr_intf->hw_dump) {
  4312. ife_dump_args.offset = dump_args.offset;
  4313. ife_dump_args.request_id = dump_info->req_id;
  4314. ife_dump_args.buf_handle = dump_info->buf_handle;
  4315. ife_dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4316. rc = ctx->hw_mgr_intf->hw_dump(
  4317. ctx->hw_mgr_intf->hw_mgr_priv,
  4318. &ife_dump_args);
  4319. dump_info->offset = ife_dump_args.offset;
  4320. }
  4321. return rc;
  4322. end:
  4323. spin_unlock_bh(&ctx->lock);
  4324. return rc;
  4325. }
  4326. static int __cam_isp_ctx_flush_req_in_flushed_state(
  4327. struct cam_context *ctx,
  4328. struct cam_req_mgr_flush_request *flush_req)
  4329. {
  4330. CAM_INFO(CAM_ISP, "Flush (type %d) in flushed state req id %lld ctx_id:%d",
  4331. flush_req->type, flush_req->req_id, ctx->ctx_id);
  4332. if (flush_req->req_id > ctx->last_flush_req)
  4333. ctx->last_flush_req = flush_req->req_id;
  4334. return 0;
  4335. }
  4336. static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
  4337. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  4338. {
  4339. int i, rc, tmp = 0;
  4340. uint32_t cancel_req_id_found = 0;
  4341. struct cam_ctx_request *req;
  4342. struct cam_ctx_request *req_temp;
  4343. struct cam_isp_ctx_req *req_isp;
  4344. struct list_head flush_list;
  4345. struct cam_isp_context *ctx_isp = NULL;
  4346. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4347. INIT_LIST_HEAD(&flush_list);
  4348. if (list_empty(req_list)) {
  4349. CAM_DBG(CAM_ISP, "request list is empty");
  4350. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4351. CAM_INFO(CAM_ISP, "no request to cancel (last applied:%lld cancel:%lld)",
  4352. ctx_isp->last_applied_req_id, flush_req->req_id);
  4353. return -EINVAL;
  4354. } else
  4355. return 0;
  4356. }
  4357. CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
  4358. flush_req->type, flush_req->req_id);
  4359. list_for_each_entry_safe(req, req_temp, req_list, list) {
  4360. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4361. if (req->request_id != flush_req->req_id) {
  4362. continue;
  4363. } else {
  4364. list_del_init(&req->list);
  4365. list_add_tail(&req->list, &flush_list);
  4366. cancel_req_id_found = 1;
  4367. __cam_isp_ctx_update_state_monitor_array(
  4368. ctx_isp,
  4369. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
  4370. req->request_id);
  4371. break;
  4372. }
  4373. }
  4374. list_del_init(&req->list);
  4375. list_add_tail(&req->list, &flush_list);
  4376. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  4377. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id);
  4378. }
  4379. if (list_empty(&flush_list)) {
  4380. /*
  4381. * Maybe the req isn't sent to KMD since UMD already skip
  4382. * req in CSL layer.
  4383. */
  4384. CAM_INFO(CAM_ISP,
  4385. "flush list is empty, flush type %d for req %llu",
  4386. flush_req->type, flush_req->req_id);
  4387. return 0;
  4388. }
  4389. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  4390. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4391. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4392. if (req_isp->fence_map_out[i].sync_id != -1) {
  4393. CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
  4394. req->request_id,
  4395. req_isp->fence_map_out[i].sync_id);
  4396. rc = cam_sync_signal(
  4397. req_isp->fence_map_out[i].sync_id,
  4398. CAM_SYNC_STATE_SIGNALED_CANCEL,
  4399. CAM_SYNC_ISP_EVENT_FLUSH);
  4400. if (rc) {
  4401. tmp = req_isp->fence_map_out[i].sync_id;
  4402. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4403. "signal fence %d failed", tmp);
  4404. }
  4405. req_isp->fence_map_out[i].sync_id = -1;
  4406. }
  4407. }
  4408. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  4409. req_isp->cdm_reset_before_apply = false;
  4410. list_del_init(&req->list);
  4411. list_add_tail(&req->list, &ctx->free_req_list);
  4412. }
  4413. return 0;
  4414. }
  4415. static int __cam_isp_ctx_flush_req_in_top_state(
  4416. struct cam_context *ctx,
  4417. struct cam_req_mgr_flush_request *flush_req)
  4418. {
  4419. int rc = 0;
  4420. struct cam_isp_context *ctx_isp;
  4421. struct cam_isp_stop_args stop_isp;
  4422. struct cam_hw_stop_args stop_args;
  4423. struct cam_hw_reset_args reset_args;
  4424. struct cam_req_mgr_timer_notify timer;
  4425. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4426. CAM_DBG(CAM_ISP, "Flush pending list");
  4427. spin_lock_bh(&ctx->lock);
  4428. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4429. spin_unlock_bh(&ctx->lock);
  4430. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  4431. if (ctx->state <= CAM_CTX_READY) {
  4432. ctx->state = CAM_CTX_ACQUIRED;
  4433. goto end;
  4434. }
  4435. spin_lock_bh(&ctx->lock);
  4436. ctx->state = CAM_CTX_FLUSHED;
  4437. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  4438. spin_unlock_bh(&ctx->lock);
  4439. CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%d",
  4440. flush_req->req_id, ctx->ctx_id);
  4441. ctx->last_flush_req = flush_req->req_id;
  4442. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx);
  4443. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4444. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  4445. stop_isp.stop_only = true;
  4446. stop_isp.is_internal_stop = false;
  4447. stop_args.args = (void *)&stop_isp;
  4448. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  4449. &stop_args);
  4450. if (rc)
  4451. CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
  4452. rc);
  4453. CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.");
  4454. CAM_DBG(CAM_ISP, "Flush wait and active lists");
  4455. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  4456. timer.link_hdl = ctx->link_hdl;
  4457. timer.dev_hdl = ctx->dev_hdl;
  4458. timer.state = false;
  4459. ctx->ctx_crm_intf->notify_timer(&timer);
  4460. }
  4461. spin_lock_bh(&ctx->lock);
  4462. if (!list_empty(&ctx->wait_req_list))
  4463. rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
  4464. flush_req);
  4465. if (!list_empty(&ctx->active_req_list))
  4466. rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
  4467. flush_req);
  4468. ctx_isp->active_req_cnt = 0;
  4469. spin_unlock_bh(&ctx->lock);
  4470. reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4471. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  4472. &reset_args);
  4473. if (rc)
  4474. CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
  4475. ctx_isp->init_received = false;
  4476. }
  4477. end:
  4478. ctx_isp->bubble_frame_cnt = 0;
  4479. atomic_set(&ctx_isp->process_bubble, 0);
  4480. atomic_set(&ctx_isp->rxd_epoch, 0);
  4481. atomic_set(&ctx_isp->internal_recovery_set, 0);
  4482. return rc;
  4483. }
  4484. static int __cam_isp_ctx_flush_req_in_ready(
  4485. struct cam_context *ctx,
  4486. struct cam_req_mgr_flush_request *flush_req)
  4487. {
  4488. int rc = 0;
  4489. CAM_DBG(CAM_ISP, "try to flush pending list");
  4490. spin_lock_bh(&ctx->lock);
  4491. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4492. /* if nothing is in pending req list, change state to acquire */
  4493. if (list_empty(&ctx->pending_req_list))
  4494. ctx->state = CAM_CTX_ACQUIRED;
  4495. spin_unlock_bh(&ctx->lock);
  4496. trace_cam_context_state("ISP", ctx);
  4497. CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
  4498. ctx->state);
  4499. return rc;
  4500. }
  4501. static struct cam_ctx_ops
  4502. cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4503. /* SOF */
  4504. {
  4505. .ioctl_ops = {},
  4506. .crm_ops = {
  4507. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4508. .notify_frame_skip =
  4509. __cam_isp_ctx_apply_default_req_settings,
  4510. },
  4511. .irq_ops = NULL,
  4512. },
  4513. /* APPLIED */
  4514. {
  4515. .ioctl_ops = {},
  4516. .crm_ops = {},
  4517. .irq_ops = NULL,
  4518. },
  4519. /* EPOCH */
  4520. {
  4521. .ioctl_ops = {},
  4522. .crm_ops = {
  4523. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4524. .notify_frame_skip =
  4525. __cam_isp_ctx_apply_default_req_settings,
  4526. },
  4527. .irq_ops = NULL,
  4528. },
  4529. /* BUBBLE */
  4530. {
  4531. .ioctl_ops = {},
  4532. .crm_ops = {
  4533. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4534. .notify_frame_skip =
  4535. __cam_isp_ctx_apply_default_req_settings,
  4536. },
  4537. .irq_ops = NULL,
  4538. },
  4539. /* Bubble Applied */
  4540. {
  4541. .ioctl_ops = {},
  4542. .crm_ops = {},
  4543. .irq_ops = NULL,
  4544. },
  4545. /* HW ERROR */
  4546. {
  4547. .ioctl_ops = {},
  4548. .crm_ops = {},
  4549. .irq_ops = NULL,
  4550. },
  4551. /* HALT */
  4552. {
  4553. .ioctl_ops = {},
  4554. .crm_ops = {},
  4555. .irq_ops = NULL,
  4556. },
  4557. };
  4558. static struct cam_ctx_ops
  4559. cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4560. /* SOF */
  4561. {
  4562. .ioctl_ops = {},
  4563. .crm_ops = {
  4564. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4565. },
  4566. .irq_ops = NULL,
  4567. },
  4568. /* APPLIED */
  4569. {
  4570. .ioctl_ops = {},
  4571. .crm_ops = {},
  4572. .irq_ops = NULL,
  4573. },
  4574. /* EPOCH */
  4575. {
  4576. .ioctl_ops = {},
  4577. .crm_ops = {
  4578. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4579. },
  4580. .irq_ops = NULL,
  4581. },
  4582. /* BUBBLE */
  4583. {
  4584. .ioctl_ops = {},
  4585. .crm_ops = {
  4586. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4587. },
  4588. .irq_ops = NULL,
  4589. },
  4590. /* Bubble Applied */
  4591. {
  4592. .ioctl_ops = {},
  4593. .crm_ops = {},
  4594. .irq_ops = NULL,
  4595. },
  4596. /* HW ERROR */
  4597. {
  4598. .ioctl_ops = {},
  4599. .crm_ops = {},
  4600. .irq_ops = NULL,
  4601. },
  4602. /* HALT */
  4603. {
  4604. .ioctl_ops = {},
  4605. .crm_ops = {},
  4606. .irq_ops = NULL,
  4607. },
  4608. };
  4609. static int __cam_isp_ctx_rdi_only_sof_in_top_state(
  4610. struct cam_isp_context *ctx_isp, void *evt_data)
  4611. {
  4612. int rc = 0;
  4613. struct cam_context *ctx = ctx_isp->base;
  4614. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4615. uint64_t request_id = 0;
  4616. if (!evt_data) {
  4617. CAM_ERR(CAM_ISP, "in valid sof event data");
  4618. return -EINVAL;
  4619. }
  4620. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4621. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4622. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4623. /*
  4624. * notify reqmgr with sof signal. Note, due to scheduling delay
  4625. * we can run into situation that two active requests has already
  4626. * be in the active queue while we try to do the notification.
  4627. * In this case, we need to skip the current notification. This
  4628. * helps the state machine to catch up the delay.
  4629. */
  4630. if (ctx_isp->active_req_cnt <= 2) {
  4631. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4632. /*
  4633. * It's possible for rup done to be processed before
  4634. * SOF, check for first active request shutter here
  4635. */
  4636. if (!list_empty(&ctx->active_req_list)) {
  4637. struct cam_ctx_request *req = NULL;
  4638. req = list_first_entry(&ctx->active_req_list,
  4639. struct cam_ctx_request, list);
  4640. if (req->request_id > ctx_isp->reported_req_id) {
  4641. request_id = req->request_id;
  4642. ctx_isp->reported_req_id = request_id;
  4643. }
  4644. }
  4645. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4646. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4647. } else {
  4648. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  4649. }
  4650. if (list_empty(&ctx->active_req_list))
  4651. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4652. else
  4653. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  4654. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4655. __cam_isp_ctx_substate_val_to_type(
  4656. ctx_isp->substate_activated));
  4657. return rc;
  4658. }
  4659. static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
  4660. struct cam_isp_context *ctx_isp, void *evt_data)
  4661. {
  4662. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4663. if (!evt_data) {
  4664. CAM_ERR(CAM_ISP, "in valid sof event data");
  4665. return -EINVAL;
  4666. }
  4667. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4668. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4669. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4670. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
  4671. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4672. __cam_isp_ctx_substate_val_to_type(
  4673. ctx_isp->substate_activated));
  4674. return 0;
  4675. }
  4676. static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
  4677. struct cam_isp_context *ctx_isp, void *evt_data)
  4678. {
  4679. struct cam_ctx_request *req;
  4680. struct cam_isp_ctx_req *req_isp;
  4681. struct cam_context *ctx = ctx_isp->base;
  4682. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4683. uint64_t request_id = 0;
  4684. /*
  4685. * Sof in bubble applied state means, reg update not received.
  4686. * before increment frame id and override time stamp value, send
  4687. * the previous sof time stamp that got captured in the
  4688. * sof in applied state.
  4689. */
  4690. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4691. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4692. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4693. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4694. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4695. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4696. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4697. if (list_empty(&ctx->wait_req_list)) {
  4698. /*
  4699. * If no pending req in epoch, this is an error case.
  4700. * The recovery is to go back to sof state
  4701. */
  4702. CAM_ERR(CAM_ISP, "No wait request");
  4703. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4704. /* Send SOF event as empty frame*/
  4705. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4706. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4707. goto end;
  4708. }
  4709. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  4710. list);
  4711. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4712. req_isp->bubble_detected = true;
  4713. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  4714. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  4715. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  4716. req_isp->cdm_reset_before_apply = false;
  4717. if (req_isp->bubble_report) {
  4718. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  4719. req->request_id, ctx_isp);
  4720. atomic_set(&ctx_isp->process_bubble, 1);
  4721. } else {
  4722. req_isp->bubble_report = 0;
  4723. }
  4724. /*
  4725. * Always move the request to active list. Let buf done
  4726. * function handles the rest.
  4727. */
  4728. list_del_init(&req->list);
  4729. list_add_tail(&req->list, &ctx->active_req_list);
  4730. ctx_isp->active_req_cnt++;
  4731. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
  4732. req->request_id, ctx_isp->active_req_cnt);
  4733. if (!req_isp->bubble_report) {
  4734. if (req->request_id > ctx_isp->reported_req_id) {
  4735. request_id = req->request_id;
  4736. ctx_isp->reported_req_id = request_id;
  4737. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4738. CAM_REQ_MGR_SOF_EVENT_ERROR);
  4739. } else
  4740. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4741. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4742. } else
  4743. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4744. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4745. /* change the state to bubble, as reg update has not come */
  4746. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  4747. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4748. __cam_isp_ctx_substate_val_to_type(
  4749. ctx_isp->substate_activated));
  4750. end:
  4751. return 0;
  4752. }
  4753. static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
  4754. struct cam_isp_context *ctx_isp, void *evt_data)
  4755. {
  4756. uint32_t i;
  4757. struct cam_ctx_request *req;
  4758. struct cam_context *ctx = ctx_isp->base;
  4759. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4760. struct cam_isp_ctx_req *req_isp;
  4761. struct cam_hw_cmd_args hw_cmd_args;
  4762. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4763. uint64_t request_id = 0;
  4764. uint64_t last_cdm_done_req = 0;
  4765. int rc = 0;
  4766. if (!evt_data) {
  4767. CAM_ERR(CAM_ISP, "in valid sof event data");
  4768. return -EINVAL;
  4769. }
  4770. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4771. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4772. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4773. if (atomic_read(&ctx_isp->process_bubble)) {
  4774. if (list_empty(&ctx->active_req_list)) {
  4775. CAM_ERR(CAM_ISP, "No available active req in bubble");
  4776. atomic_set(&ctx_isp->process_bubble, 0);
  4777. return -EINVAL;
  4778. }
  4779. if (ctx_isp->last_sof_timestamp ==
  4780. ctx_isp->sof_timestamp_val) {
  4781. CAM_DBG(CAM_ISP,
  4782. "Tasklet delay detected! Bubble frame: %lld check skipped, sof_timestamp: %lld, ctx_id: %d",
  4783. ctx_isp->frame_id,
  4784. ctx_isp->sof_timestamp_val,
  4785. ctx->ctx_id);
  4786. goto end;
  4787. }
  4788. req = list_first_entry(&ctx->active_req_list,
  4789. struct cam_ctx_request, list);
  4790. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4791. if (req_isp->bubble_detected) {
  4792. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4793. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4794. isp_hw_cmd_args.cmd_type =
  4795. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  4796. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4797. rc = ctx->hw_mgr_intf->hw_cmd(
  4798. ctx->hw_mgr_intf->hw_mgr_priv,
  4799. &hw_cmd_args);
  4800. if (rc) {
  4801. CAM_ERR(CAM_ISP, "HW command failed");
  4802. return rc;
  4803. }
  4804. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  4805. CAM_DBG(CAM_ISP, "last_cdm_done req: %d ctx_id: %d",
  4806. last_cdm_done_req, ctx->ctx_id);
  4807. if (last_cdm_done_req >= req->request_id) {
  4808. CAM_DBG(CAM_ISP,
  4809. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  4810. req->request_id);
  4811. if (req_isp->num_fence_map_out ==
  4812. req_isp->num_deferred_acks) {
  4813. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  4814. true,
  4815. CAM_SYNC_STATE_SIGNALED_ERROR,
  4816. CAM_SYNC_ISP_EVENT_BUBBLE);
  4817. __cam_isp_ctx_handle_buf_done_for_req_list(
  4818. ctx_isp, req);
  4819. }
  4820. goto end;
  4821. } else {
  4822. CAM_WARN(CAM_ISP,
  4823. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  4824. req->request_id);
  4825. req_isp->num_acked = 0;
  4826. req_isp->num_deferred_acks = 0;
  4827. req_isp->bubble_detected = false;
  4828. req_isp->cdm_reset_before_apply = true;
  4829. list_del_init(&req->list);
  4830. list_add(&req->list, &ctx->pending_req_list);
  4831. atomic_set(&ctx_isp->process_bubble, 0);
  4832. ctx_isp->active_req_cnt--;
  4833. CAM_DBG(CAM_REQ,
  4834. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  4835. req->request_id,
  4836. ctx_isp->active_req_cnt, ctx->ctx_id);
  4837. }
  4838. goto end;
  4839. }
  4840. }
  4841. /*
  4842. * Signal all active requests with error and move the all the active
  4843. * requests to free list
  4844. */
  4845. while (!list_empty(&ctx->active_req_list)) {
  4846. req = list_first_entry(&ctx->active_req_list,
  4847. struct cam_ctx_request, list);
  4848. list_del_init(&req->list);
  4849. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4850. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  4851. req_isp->num_fence_map_out);
  4852. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4853. if (req_isp->fence_map_out[i].sync_id != -1) {
  4854. cam_sync_signal(
  4855. req_isp->fence_map_out[i].sync_id,
  4856. CAM_SYNC_STATE_SIGNALED_ERROR,
  4857. CAM_SYNC_ISP_EVENT_BUBBLE);
  4858. }
  4859. list_add_tail(&req->list, &ctx->free_req_list);
  4860. ctx_isp->active_req_cnt--;
  4861. }
  4862. end:
  4863. /* notify reqmgr with sof signal */
  4864. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4865. /*
  4866. * It is idle frame with out any applied request id, send
  4867. * request id as zero
  4868. */
  4869. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4870. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4871. /*
  4872. * Can't move the substate to SOF if we are processing bubble,
  4873. * since the SOF substate can't receive REG_UPD and buf done,
  4874. * then the processing of bubble req can't be finished
  4875. */
  4876. if (!atomic_read(&ctx_isp->process_bubble))
  4877. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4878. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4879. __cam_isp_ctx_substate_val_to_type(
  4880. ctx_isp->substate_activated));
  4881. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  4882. return 0;
  4883. }
  4884. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state(
  4885. struct cam_isp_context *ctx_isp, void *evt_data)
  4886. {
  4887. struct cam_ctx_request *req = NULL;
  4888. struct cam_context *ctx = ctx_isp->base;
  4889. req = list_first_entry(&ctx->active_req_list,
  4890. struct cam_ctx_request, list);
  4891. CAM_INFO(CAM_ISP, "Received RUP for Bubble Request", req->request_id);
  4892. return 0;
  4893. }
  4894. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
  4895. struct cam_isp_context *ctx_isp, void *evt_data)
  4896. {
  4897. struct cam_ctx_request *req = NULL;
  4898. struct cam_context *ctx = ctx_isp->base;
  4899. struct cam_isp_ctx_req *req_isp;
  4900. uint64_t request_id = 0;
  4901. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  4902. /* notify reqmgr with sof signal*/
  4903. if (list_empty(&ctx->wait_req_list)) {
  4904. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  4905. goto error;
  4906. }
  4907. req = list_first_entry(&ctx->wait_req_list,
  4908. struct cam_ctx_request, list);
  4909. list_del_init(&req->list);
  4910. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4911. request_id =
  4912. (req_isp->hw_update_data.packet_opcode_type ==
  4913. CAM_ISP_PACKET_INIT_DEV) ? 0 : req->request_id;
  4914. if (req_isp->num_fence_map_out != 0) {
  4915. list_add_tail(&req->list, &ctx->active_req_list);
  4916. ctx_isp->active_req_cnt++;
  4917. CAM_DBG(CAM_ISP,
  4918. "move request %lld to active list(cnt = %d)",
  4919. req->request_id, ctx_isp->active_req_cnt);
  4920. /* if packet has buffers, set correct request id */
  4921. request_id = req->request_id;
  4922. } else {
  4923. /* no io config, so the request is completed. */
  4924. list_add_tail(&req->list, &ctx->free_req_list);
  4925. CAM_DBG(CAM_ISP,
  4926. "move active req %lld to free list(cnt=%d)",
  4927. req->request_id, ctx_isp->active_req_cnt);
  4928. }
  4929. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4930. if (request_id)
  4931. ctx_isp->reported_req_id = request_id;
  4932. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4933. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4934. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4935. __cam_isp_ctx_substate_val_to_type(
  4936. ctx_isp->substate_activated));
  4937. __cam_isp_ctx_update_event_record(ctx_isp,
  4938. CAM_ISP_CTX_EVENT_RUP, req);
  4939. return 0;
  4940. error:
  4941. /* Send SOF event as idle frame*/
  4942. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4943. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4944. __cam_isp_ctx_update_event_record(ctx_isp,
  4945. CAM_ISP_CTX_EVENT_RUP, NULL);
  4946. /*
  4947. * There is no request in the pending list, move the sub state machine
  4948. * to SOF sub state
  4949. */
  4950. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4951. return 0;
  4952. }
  4953. static struct cam_isp_ctx_irq_ops
  4954. cam_isp_ctx_rdi_only_activated_state_machine_irq
  4955. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  4956. /* SOF */
  4957. {
  4958. .irq_ops = {
  4959. NULL,
  4960. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4961. __cam_isp_ctx_reg_upd_in_sof,
  4962. NULL,
  4963. __cam_isp_ctx_notify_eof_in_activated_state,
  4964. NULL,
  4965. },
  4966. },
  4967. /* APPLIED */
  4968. {
  4969. .irq_ops = {
  4970. __cam_isp_ctx_handle_error,
  4971. __cam_isp_ctx_rdi_only_sof_in_applied_state,
  4972. __cam_isp_ctx_reg_upd_in_applied_state,
  4973. NULL,
  4974. __cam_isp_ctx_notify_eof_in_activated_state,
  4975. __cam_isp_ctx_buf_done_in_applied,
  4976. },
  4977. },
  4978. /* EPOCH */
  4979. {
  4980. .irq_ops = {
  4981. __cam_isp_ctx_handle_error,
  4982. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4983. NULL,
  4984. NULL,
  4985. __cam_isp_ctx_notify_eof_in_activated_state,
  4986. __cam_isp_ctx_buf_done_in_epoch,
  4987. },
  4988. },
  4989. /* BUBBLE*/
  4990. {
  4991. .irq_ops = {
  4992. __cam_isp_ctx_handle_error,
  4993. __cam_isp_ctx_rdi_only_sof_in_bubble_state,
  4994. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state,
  4995. NULL,
  4996. __cam_isp_ctx_notify_eof_in_activated_state,
  4997. __cam_isp_ctx_buf_done_in_bubble,
  4998. },
  4999. },
  5000. /* BUBBLE APPLIED ie PRE_BUBBLE */
  5001. {
  5002. .irq_ops = {
  5003. __cam_isp_ctx_handle_error,
  5004. __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
  5005. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
  5006. NULL,
  5007. __cam_isp_ctx_notify_eof_in_activated_state,
  5008. __cam_isp_ctx_buf_done_in_bubble_applied,
  5009. },
  5010. },
  5011. /* HW ERROR */
  5012. {
  5013. },
  5014. /* HALT */
  5015. {
  5016. },
  5017. };
  5018. static int __cam_isp_ctx_rdi_only_apply_req_top_state(
  5019. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  5020. {
  5021. int rc = 0;
  5022. struct cam_isp_context *ctx_isp =
  5023. (struct cam_isp_context *) ctx->ctx_priv;
  5024. CAM_DBG(CAM_ISP, "current Substate[%s]",
  5025. __cam_isp_ctx_substate_val_to_type(
  5026. ctx_isp->substate_activated));
  5027. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  5028. CAM_ISP_CTX_ACTIVATED_APPLIED);
  5029. CAM_DBG(CAM_ISP, "new Substate[%s]",
  5030. __cam_isp_ctx_substate_val_to_type(
  5031. ctx_isp->substate_activated));
  5032. if (rc)
  5033. CAM_ERR_RATE_LIMIT(CAM_ISP,
  5034. "ctx_id:%d Apply failed in Substate[%s], rc %d",
  5035. ctx->ctx_id,
  5036. __cam_isp_ctx_substate_val_to_type(
  5037. ctx_isp->substate_activated), rc);
  5038. return rc;
  5039. }
  5040. static struct cam_ctx_ops
  5041. cam_isp_ctx_rdi_only_activated_state_machine
  5042. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  5043. /* SOF */
  5044. {
  5045. .ioctl_ops = {},
  5046. .crm_ops = {
  5047. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  5048. },
  5049. .irq_ops = NULL,
  5050. },
  5051. /* APPLIED */
  5052. {
  5053. .ioctl_ops = {},
  5054. .crm_ops = {},
  5055. .irq_ops = NULL,
  5056. },
  5057. /* EPOCH */
  5058. {
  5059. .ioctl_ops = {},
  5060. .crm_ops = {
  5061. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  5062. },
  5063. .irq_ops = NULL,
  5064. },
  5065. /* PRE BUBBLE */
  5066. {
  5067. .ioctl_ops = {},
  5068. .crm_ops = {},
  5069. .irq_ops = NULL,
  5070. },
  5071. /* BUBBLE */
  5072. {
  5073. .ioctl_ops = {},
  5074. .crm_ops = {},
  5075. .irq_ops = NULL,
  5076. },
  5077. /* HW ERROR */
  5078. {
  5079. .ioctl_ops = {},
  5080. .crm_ops = {},
  5081. .irq_ops = NULL,
  5082. },
  5083. /* HALT */
  5084. {
  5085. .ioctl_ops = {},
  5086. .crm_ops = {},
  5087. .irq_ops = NULL,
  5088. },
  5089. };
  5090. static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
  5091. struct cam_flush_dev_cmd *cmd)
  5092. {
  5093. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  5094. struct cam_req_mgr_flush_request flush_req;
  5095. if (!ctx_isp->offline_context) {
  5096. CAM_ERR(CAM_ISP, "flush dev only supported in offline context");
  5097. return -EINVAL;
  5098. }
  5099. flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL :
  5100. CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ;
  5101. flush_req.req_id = cmd->req_id;
  5102. CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu)", flush_req.type, flush_req.req_id);
  5103. switch (ctx->state) {
  5104. case CAM_CTX_ACQUIRED:
  5105. case CAM_CTX_ACTIVATED:
  5106. return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req);
  5107. case CAM_CTX_READY:
  5108. return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req);
  5109. default:
  5110. CAM_ERR(CAM_ISP, "flush dev in wrong state: %d", ctx->state);
  5111. return -EINVAL;
  5112. }
  5113. if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
  5114. cam_req_mgr_workq_flush(ctx_isp->workq);
  5115. }
  5116. static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
  5117. {
  5118. int i;
  5119. if (ctx->out_map_entries) {
  5120. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5121. kfree(ctx->out_map_entries[i]);
  5122. ctx->out_map_entries[i] = NULL;
  5123. }
  5124. kfree(ctx->out_map_entries);
  5125. ctx->out_map_entries = NULL;
  5126. }
  5127. if (ctx->in_map_entries) {
  5128. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5129. kfree(ctx->in_map_entries[i]);
  5130. ctx->in_map_entries[i] = NULL;
  5131. }
  5132. kfree(ctx->in_map_entries);
  5133. ctx->in_map_entries = NULL;
  5134. }
  5135. if (ctx->hw_update_entry) {
  5136. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5137. kfree(ctx->hw_update_entry[i]);
  5138. ctx->hw_update_entry[i] = NULL;
  5139. }
  5140. kfree(ctx->hw_update_entry);
  5141. ctx->hw_update_entry = NULL;
  5142. }
  5143. ctx->max_out_map_entries = 0;
  5144. ctx->max_in_map_entries = 0;
  5145. ctx->max_hw_update_entries = 0;
  5146. }
  5147. static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
  5148. void *cmd)
  5149. {
  5150. int rc = 0;
  5151. struct cam_hw_release_args rel_arg;
  5152. struct cam_isp_context *ctx_isp =
  5153. (struct cam_isp_context *) ctx->ctx_priv;
  5154. struct cam_req_mgr_flush_request flush_req;
  5155. int i;
  5156. if (ctx_isp->hw_ctx) {
  5157. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5158. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5159. &rel_arg);
  5160. ctx_isp->hw_ctx = NULL;
  5161. } else {
  5162. CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u]", ctx->ctx_id);
  5163. }
  5164. ctx->last_flush_req = 0;
  5165. ctx_isp->custom_enabled = false;
  5166. ctx_isp->use_frame_header_ts = false;
  5167. ctx_isp->use_default_apply = false;
  5168. ctx_isp->frame_id = 0;
  5169. ctx_isp->active_req_cnt = 0;
  5170. ctx_isp->reported_req_id = 0;
  5171. ctx_isp->reported_frame_id = 0;
  5172. ctx_isp->hw_acquired = false;
  5173. ctx_isp->init_received = false;
  5174. ctx_isp->support_consumed_addr = false;
  5175. ctx_isp->aeb_enabled = false;
  5176. ctx_isp->do_internal_recovery = false;
  5177. ctx_isp->req_info.last_bufdone_req_id = 0;
  5178. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5179. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5180. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5181. /*
  5182. * Ideally, we should never have any active request here.
  5183. * But we still add some sanity check code here to help the debug
  5184. */
  5185. if (!list_empty(&ctx->active_req_list))
  5186. CAM_WARN(CAM_ISP, "Active list is not empty");
  5187. /* Flush all the pending request list */
  5188. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  5189. flush_req.link_hdl = ctx->link_hdl;
  5190. flush_req.dev_hdl = ctx->dev_hdl;
  5191. flush_req.req_id = 0;
  5192. CAM_DBG(CAM_ISP, "try to flush pending list");
  5193. spin_lock_bh(&ctx->lock);
  5194. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  5195. spin_unlock_bh(&ctx->lock);
  5196. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5197. cam_req_mgr_workq_destroy(&ctx_isp->workq);
  5198. ctx->state = CAM_CTX_ACQUIRED;
  5199. trace_cam_context_state("ISP", ctx);
  5200. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  5201. ctx->ctx_id, ctx->state);
  5202. return rc;
  5203. }
  5204. /* top level state machine */
  5205. static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
  5206. struct cam_release_dev_cmd *cmd)
  5207. {
  5208. int rc = 0;
  5209. int i;
  5210. struct cam_hw_release_args rel_arg;
  5211. struct cam_isp_context *ctx_isp =
  5212. (struct cam_isp_context *) ctx->ctx_priv;
  5213. struct cam_req_mgr_flush_request flush_req;
  5214. if (cmd && ctx_isp->hw_ctx) {
  5215. CAM_ERR(CAM_ISP, "releasing hw");
  5216. __cam_isp_ctx_release_hw_in_top_state(ctx, NULL);
  5217. }
  5218. if (ctx_isp->hw_ctx) {
  5219. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5220. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5221. &rel_arg);
  5222. ctx_isp->hw_ctx = NULL;
  5223. }
  5224. cam_common_release_evt_params(ctx->dev_hdl);
  5225. memset(&ctx_isp->evt_inject_params, 0, sizeof(struct cam_hw_inject_evt_param));
  5226. ctx->session_hdl = -1;
  5227. ctx->dev_hdl = -1;
  5228. ctx->link_hdl = -1;
  5229. ctx->ctx_crm_intf = NULL;
  5230. ctx->last_flush_req = 0;
  5231. ctx_isp->frame_id = 0;
  5232. ctx_isp->active_req_cnt = 0;
  5233. ctx_isp->reported_req_id = 0;
  5234. ctx_isp->reported_frame_id = 0;
  5235. ctx_isp->hw_acquired = false;
  5236. ctx_isp->init_received = false;
  5237. ctx_isp->offline_context = false;
  5238. ctx_isp->vfps_aux_context = false;
  5239. ctx_isp->rdi_only_context = false;
  5240. ctx_isp->req_info.last_bufdone_req_id = 0;
  5241. ctx_isp->v4l2_event_sub_ids = 0;
  5242. ctx_isp->resume_hw_in_flushed = false;
  5243. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5244. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5245. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5246. /*
  5247. * Ideally, we should never have any active request here.
  5248. * But we still add some sanity check code here to help the debug
  5249. */
  5250. if (!list_empty(&ctx->active_req_list))
  5251. CAM_ERR(CAM_ISP, "Active list is not empty");
  5252. /* Flush all the pending request list */
  5253. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  5254. flush_req.link_hdl = ctx->link_hdl;
  5255. flush_req.dev_hdl = ctx->dev_hdl;
  5256. flush_req.req_id = 0;
  5257. CAM_DBG(CAM_ISP, "try to flush pending list");
  5258. spin_lock_bh(&ctx->lock);
  5259. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  5260. spin_unlock_bh(&ctx->lock);
  5261. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5262. ctx->state = CAM_CTX_AVAILABLE;
  5263. trace_cam_context_state("ISP", ctx);
  5264. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  5265. ctx->ctx_id, ctx->state);
  5266. return rc;
  5267. }
  5268. static int __cam_isp_ctx_config_dev_in_top_state(
  5269. struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
  5270. {
  5271. int rc = 0, i;
  5272. struct cam_ctx_request *req = NULL;
  5273. struct cam_isp_ctx_req *req_isp;
  5274. struct cam_packet *packet;
  5275. size_t remain_len = 0;
  5276. struct cam_hw_prepare_update_args cfg = {0};
  5277. struct cam_req_mgr_add_request add_req;
  5278. struct cam_isp_context *ctx_isp =
  5279. (struct cam_isp_context *) ctx->ctx_priv;
  5280. struct cam_hw_cmd_args hw_cmd_args;
  5281. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5282. uint32_t packet_opcode = 0;
  5283. CAM_DBG(CAM_ISP, "get free request object......");
  5284. /* get free request */
  5285. spin_lock_bh(&ctx->lock);
  5286. if (!list_empty(&ctx->free_req_list)) {
  5287. req = list_first_entry(&ctx->free_req_list,
  5288. struct cam_ctx_request, list);
  5289. list_del_init(&req->list);
  5290. }
  5291. spin_unlock_bh(&ctx->lock);
  5292. if (!req) {
  5293. CAM_ERR(CAM_ISP, "No more request obj free");
  5294. return -ENOMEM;
  5295. }
  5296. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5297. remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet);
  5298. if (IS_ERR(packet)) {
  5299. rc = PTR_ERR(packet);
  5300. goto free_req;
  5301. }
  5302. /* Query the packet opcode */
  5303. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5304. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5305. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE;
  5306. isp_hw_cmd_args.cmd_data = (void *)packet;
  5307. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5308. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5309. &hw_cmd_args);
  5310. if (rc) {
  5311. CAM_ERR(CAM_ISP, "HW command failed");
  5312. goto free_req;
  5313. }
  5314. packet_opcode = isp_hw_cmd_args.u.packet_op_code;
  5315. if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV)
  5316. && (packet->header.request_id <= ctx->last_flush_req)) {
  5317. CAM_INFO(CAM_ISP,
  5318. "request %lld has been flushed, reject packet",
  5319. packet->header.request_id);
  5320. rc = -EBADR;
  5321. goto free_req;
  5322. } else if ((packet_opcode == CAM_ISP_PACKET_INIT_DEV)
  5323. && (packet->header.request_id <= ctx->last_flush_req)
  5324. && ctx->last_flush_req && packet->header.request_id) {
  5325. CAM_WARN(CAM_ISP,
  5326. "last flushed req is %lld, config dev(init) for req %lld",
  5327. ctx->last_flush_req, packet->header.request_id);
  5328. rc = -EBADR;
  5329. goto free_req;
  5330. }
  5331. cfg.packet = packet;
  5332. cfg.remain_len = remain_len;
  5333. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5334. cfg.max_hw_update_entries = ctx->max_hw_update_entries;
  5335. cfg.hw_update_entries = req_isp->cfg;
  5336. cfg.max_out_map_entries = ctx->max_out_map_entries;
  5337. cfg.max_in_map_entries = ctx->max_in_map_entries;
  5338. cfg.out_map_entries = req_isp->fence_map_out;
  5339. cfg.in_map_entries = req_isp->fence_map_in;
  5340. cfg.priv = &req_isp->hw_update_data;
  5341. cfg.pf_data = &(req->pf_data);
  5342. cfg.num_out_map_entries = 0;
  5343. cfg.num_in_map_entries = 0;
  5344. memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
  5345. rc = ctx->hw_mgr_intf->hw_prepare_update(
  5346. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  5347. if (rc != 0) {
  5348. CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
  5349. rc = -EFAULT;
  5350. goto free_req;
  5351. }
  5352. req_isp->num_cfg = cfg.num_hw_update_entries;
  5353. req_isp->num_fence_map_out = cfg.num_out_map_entries;
  5354. req_isp->num_fence_map_in = cfg.num_in_map_entries;
  5355. req_isp->num_acked = 0;
  5356. req_isp->num_deferred_acks = 0;
  5357. req_isp->bubble_detected = false;
  5358. req_isp->cdm_reset_before_apply = false;
  5359. req_isp->hw_update_data.packet = packet;
  5360. req->pf_data.packet_handle = cmd->packet_handle;
  5361. req->pf_data.packet_offset = cmd->offset;
  5362. req->pf_data.req = req;
  5363. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  5364. rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
  5365. if (rc) {
  5366. CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
  5367. req_isp->fence_map_out[i].sync_id);
  5368. goto put_ref;
  5369. }
  5370. }
  5371. CAM_DBG(CAM_ISP,
  5372. "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d",
  5373. packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
  5374. req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in);
  5375. req->request_id = packet->header.request_id;
  5376. req->status = 1;
  5377. if (req_isp->hw_update_data.packet_opcode_type ==
  5378. CAM_ISP_PACKET_INIT_DEV) {
  5379. if (ctx->state < CAM_CTX_ACTIVATED) {
  5380. rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
  5381. if (rc)
  5382. CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
  5383. ctx_isp->init_received = true;
  5384. if ((ctx_isp->vfps_aux_context) && (req->request_id > 0))
  5385. ctx_isp->resume_hw_in_flushed = true;
  5386. else
  5387. ctx_isp->resume_hw_in_flushed = false;
  5388. } else {
  5389. rc = -EINVAL;
  5390. CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state:%d",
  5391. ctx->state);
  5392. }
  5393. } else {
  5394. if ((ctx->state == CAM_CTX_FLUSHED) || (ctx->state < CAM_CTX_READY)) {
  5395. rc = -EINVAL;
  5396. CAM_ERR(CAM_ISP, "Received update req %lld in wrong state:%d",
  5397. req->request_id, ctx->state);
  5398. goto put_ref;
  5399. }
  5400. if ((ctx_isp->offline_context) || (ctx_isp->vfps_aux_context)) {
  5401. __cam_isp_ctx_enqueue_request_in_order(ctx, req, true);
  5402. } else if (ctx->ctx_crm_intf->add_req) {
  5403. memset(&add_req, 0, sizeof(add_req));
  5404. add_req.link_hdl = ctx->link_hdl;
  5405. add_req.dev_hdl = ctx->dev_hdl;
  5406. add_req.req_id = req->request_id;
  5407. rc = ctx->ctx_crm_intf->add_req(&add_req);
  5408. if (rc) {
  5409. if (rc == -EBADR)
  5410. CAM_INFO(CAM_ISP,
  5411. "Add req failed: req id=%llu, it has been flushed",
  5412. req->request_id);
  5413. else
  5414. CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
  5415. req->request_id);
  5416. } else {
  5417. __cam_isp_ctx_enqueue_request_in_order(
  5418. ctx, req, true);
  5419. }
  5420. } else {
  5421. CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu", req->request_id);
  5422. rc = -ENODEV;
  5423. }
  5424. }
  5425. if (rc)
  5426. goto put_ref;
  5427. CAM_DBG(CAM_REQ,
  5428. "Preprocessing Config req_id %lld successful on ctx %u",
  5429. req->request_id, ctx->ctx_id);
  5430. if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch))
  5431. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  5432. else if (ctx_isp->vfps_aux_context &&
  5433. (req_isp->hw_update_data.packet_opcode_type != CAM_ISP_PACKET_INIT_DEV))
  5434. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  5435. return rc;
  5436. put_ref:
  5437. for (--i; i >= 0; i--) {
  5438. if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id))
  5439. CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
  5440. req_isp->fence_map_out[i].sync_id);
  5441. }
  5442. free_req:
  5443. spin_lock_bh(&ctx->lock);
  5444. list_add_tail(&req->list, &ctx->free_req_list);
  5445. spin_unlock_bh(&ctx->lock);
  5446. return rc;
  5447. }
  5448. static int __cam_isp_ctx_allocate_mem_hw_entries(
  5449. struct cam_context *ctx,
  5450. struct cam_hw_acquire_args *param)
  5451. {
  5452. int rc = 0, i;
  5453. uint32_t max_res = 0;
  5454. uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX;
  5455. struct cam_ctx_request *req;
  5456. struct cam_ctx_request *temp_req;
  5457. struct cam_isp_ctx_req *req_isp;
  5458. if (!param->op_params.param_list[0])
  5459. max_res = CAM_ISP_CTX_RES_MAX;
  5460. else {
  5461. max_res = param->op_params.param_list[0];
  5462. if (param->op_flags & CAM_IFE_CTX_SFE_EN) {
  5463. max_res += param->op_params.param_list[1];
  5464. max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX;
  5465. }
  5466. }
  5467. ctx->max_in_map_entries = max_res;
  5468. ctx->max_out_map_entries = max_res;
  5469. ctx->max_hw_update_entries = max_hw_upd_entries;
  5470. CAM_DBG(CAM_ISP,
  5471. "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d",
  5472. max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN));
  5473. ctx->hw_update_entry = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_update_entry *),
  5474. GFP_KERNEL);
  5475. if (!ctx->hw_update_entry) {
  5476. CAM_ERR(CAM_CTXT, "%s[%d] no memory ",
  5477. ctx->dev_name, ctx->ctx_id);
  5478. return -ENOMEM;
  5479. }
  5480. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5481. ctx->hw_update_entry[i] = kcalloc(ctx->max_hw_update_entries,
  5482. sizeof(struct cam_hw_update_entry), GFP_KERNEL);
  5483. if (!ctx->hw_update_entry[i]) {
  5484. CAM_ERR(CAM_CTXT, "%s[%d] no memory for hw_update_entry: %u",
  5485. ctx->dev_name, ctx->ctx_id, i);
  5486. return -ENOMEM;
  5487. }
  5488. }
  5489. ctx->in_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5490. GFP_KERNEL);
  5491. if (!ctx->in_map_entries) {
  5492. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries",
  5493. ctx->dev_name, ctx->ctx_id);
  5494. rc = -ENOMEM;
  5495. goto end;
  5496. }
  5497. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5498. ctx->in_map_entries[i] = kcalloc(ctx->max_in_map_entries,
  5499. sizeof(struct cam_hw_fence_map_entry),
  5500. GFP_KERNEL);
  5501. if (!ctx->in_map_entries[i]) {
  5502. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries: %u",
  5503. ctx->dev_name, ctx->ctx_id, i);
  5504. rc = -ENOMEM;
  5505. goto end;
  5506. }
  5507. }
  5508. ctx->out_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5509. GFP_KERNEL);
  5510. if (!ctx->out_map_entries) {
  5511. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries",
  5512. ctx->dev_name, ctx->ctx_id);
  5513. rc = -ENOMEM;
  5514. goto end;
  5515. }
  5516. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5517. ctx->out_map_entries[i] = kcalloc(ctx->max_out_map_entries,
  5518. sizeof(struct cam_hw_fence_map_entry),
  5519. GFP_KERNEL);
  5520. if (!ctx->out_map_entries[i]) {
  5521. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries: %u",
  5522. ctx->dev_name, ctx->ctx_id, i);
  5523. rc = -ENOMEM;
  5524. goto end;
  5525. }
  5526. }
  5527. list_for_each_entry_safe(req, temp_req,
  5528. &ctx->free_req_list, list) {
  5529. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5530. req_isp->cfg = ctx->hw_update_entry[req->index];
  5531. req_isp->fence_map_in = ctx->in_map_entries[req->index];
  5532. req_isp->fence_map_out = ctx->out_map_entries[req->index];
  5533. }
  5534. return rc;
  5535. end:
  5536. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5537. return rc;
  5538. }
  5539. static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
  5540. struct cam_acquire_dev_cmd *cmd)
  5541. {
  5542. int rc = 0;
  5543. int i;
  5544. struct cam_hw_acquire_args param;
  5545. struct cam_isp_resource *isp_res = NULL;
  5546. struct cam_create_dev_hdl req_hdl_param;
  5547. struct cam_hw_release_args release;
  5548. struct cam_isp_context *ctx_isp =
  5549. (struct cam_isp_context *) ctx->ctx_priv;
  5550. struct cam_hw_cmd_args hw_cmd_args;
  5551. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5552. if (!ctx->hw_mgr_intf) {
  5553. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5554. rc = -EFAULT;
  5555. goto end;
  5556. }
  5557. CAM_DBG(CAM_ISP,
  5558. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  5559. cmd->session_handle, cmd->num_resources,
  5560. cmd->handle_type, cmd->resource_hdl);
  5561. ctx_isp->v4l2_event_sub_ids = cam_req_mgr_get_id_subscribed();
  5562. if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
  5563. ctx_isp->split_acquire = true;
  5564. CAM_DBG(CAM_ISP, "Acquire dev handle");
  5565. goto get_dev_handle;
  5566. }
  5567. if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
  5568. CAM_ERR(CAM_ISP, "Too much resources in the acquire");
  5569. rc = -ENOMEM;
  5570. goto end;
  5571. }
  5572. /* for now we only support user pointer */
  5573. if (cmd->handle_type != 1) {
  5574. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5575. rc = -EINVAL;
  5576. goto end;
  5577. }
  5578. isp_res = kzalloc(
  5579. sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
  5580. if (!isp_res) {
  5581. rc = -ENOMEM;
  5582. goto end;
  5583. }
  5584. CAM_DBG(CAM_ISP, "start copy %d resources from user",
  5585. cmd->num_resources);
  5586. if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
  5587. sizeof(*isp_res)*cmd->num_resources)) {
  5588. rc = -EFAULT;
  5589. goto free_res;
  5590. }
  5591. memset(&param, 0, sizeof(param));
  5592. param.context_data = ctx;
  5593. param.event_cb = ctx->irq_cb_intf;
  5594. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5595. param.num_acq = cmd->num_resources;
  5596. param.acquire_info = (uintptr_t) isp_res;
  5597. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5598. if (rc) {
  5599. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5600. ctx->ctx_id);
  5601. goto free_res;
  5602. }
  5603. /* call HW manager to reserve the resource */
  5604. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5605. &param);
  5606. if (rc != 0) {
  5607. CAM_ERR(CAM_ISP, "Acquire device failed");
  5608. goto free_res;
  5609. }
  5610. /* Query the context has rdi only resource */
  5611. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5612. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5613. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5614. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5615. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5616. &hw_cmd_args);
  5617. if (rc) {
  5618. CAM_ERR(CAM_ISP, "HW command failed");
  5619. goto free_hw;
  5620. }
  5621. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5622. /*
  5623. * this context has rdi only resource assign rdi only
  5624. * state machine
  5625. */
  5626. CAM_DBG(CAM_ISP, "RDI only session Context");
  5627. ctx_isp->substate_machine_irq =
  5628. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5629. ctx_isp->substate_machine =
  5630. cam_isp_ctx_rdi_only_activated_state_machine;
  5631. ctx_isp->rdi_only_context = true;
  5632. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5633. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5634. ctx_isp->substate_machine_irq =
  5635. cam_isp_ctx_fs2_state_machine_irq;
  5636. ctx_isp->substate_machine =
  5637. cam_isp_ctx_fs2_state_machine;
  5638. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5639. CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
  5640. ctx_isp->substate_machine_irq =
  5641. cam_isp_ctx_offline_state_machine_irq;
  5642. } else {
  5643. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5644. ctx_isp->substate_machine_irq =
  5645. cam_isp_ctx_activated_state_machine_irq;
  5646. ctx_isp->substate_machine =
  5647. cam_isp_ctx_activated_state_machine;
  5648. }
  5649. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5650. ctx_isp->hw_acquired = true;
  5651. ctx_isp->split_acquire = false;
  5652. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5653. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5654. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5655. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5656. kfree(isp_res);
  5657. isp_res = NULL;
  5658. get_dev_handle:
  5659. req_hdl_param.session_hdl = cmd->session_handle;
  5660. /* bridge is not ready for these flags. so false for now */
  5661. req_hdl_param.v4l2_sub_dev_flag = 0;
  5662. req_hdl_param.media_entity_flag = 0;
  5663. req_hdl_param.ops = ctx->crm_ctx_intf;
  5664. req_hdl_param.priv = ctx;
  5665. req_hdl_param.dev_id = CAM_ISP;
  5666. CAM_DBG(CAM_ISP, "get device handle form bridge");
  5667. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  5668. if (ctx->dev_hdl <= 0) {
  5669. rc = -EFAULT;
  5670. CAM_ERR(CAM_ISP, "Can not create device handle");
  5671. goto free_hw;
  5672. }
  5673. cmd->dev_handle = ctx->dev_hdl;
  5674. /* store session information */
  5675. ctx->session_hdl = cmd->session_handle;
  5676. ctx->state = CAM_CTX_ACQUIRED;
  5677. trace_cam_context_state("ISP", ctx);
  5678. CAM_DBG(CAM_ISP,
  5679. "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
  5680. cmd->session_handle, cmd->num_resources, ctx->ctx_id);
  5681. return rc;
  5682. free_hw:
  5683. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5684. if (ctx_isp->hw_acquired)
  5685. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5686. &release);
  5687. ctx_isp->hw_ctx = NULL;
  5688. ctx_isp->hw_acquired = false;
  5689. free_res:
  5690. kfree(isp_res);
  5691. end:
  5692. return rc;
  5693. }
  5694. static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
  5695. void *args)
  5696. {
  5697. int rc = 0;
  5698. int i;
  5699. struct cam_acquire_hw_cmd_v1 *cmd =
  5700. (struct cam_acquire_hw_cmd_v1 *)args;
  5701. struct cam_hw_acquire_args param;
  5702. struct cam_hw_release_args release;
  5703. struct cam_isp_context *ctx_isp =
  5704. (struct cam_isp_context *) ctx->ctx_priv;
  5705. struct cam_hw_cmd_args hw_cmd_args;
  5706. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5707. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5708. if (!ctx->hw_mgr_intf) {
  5709. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5710. rc = -EFAULT;
  5711. goto end;
  5712. }
  5713. CAM_DBG(CAM_ISP,
  5714. "session_hdl 0x%x, hdl type %d, res %lld",
  5715. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5716. /* for now we only support user pointer */
  5717. if (cmd->handle_type != 1) {
  5718. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5719. rc = -EINVAL;
  5720. goto end;
  5721. }
  5722. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5723. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5724. goto end;
  5725. }
  5726. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5727. if (!acquire_hw_info) {
  5728. rc = -ENOMEM;
  5729. goto end;
  5730. }
  5731. CAM_DBG(CAM_ISP, "start copy resources from user");
  5732. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5733. cmd->data_size)) {
  5734. rc = -EFAULT;
  5735. goto free_res;
  5736. }
  5737. memset(&param, 0, sizeof(param));
  5738. param.context_data = ctx;
  5739. param.event_cb = ctx->irq_cb_intf;
  5740. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5741. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5742. param.acquire_info_size = cmd->data_size;
  5743. param.acquire_info = (uint64_t) acquire_hw_info;
  5744. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5745. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
  5746. &param);
  5747. if (rc) {
  5748. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5749. ctx->ctx_id);
  5750. goto free_res;
  5751. }
  5752. /* call HW manager to reserve the resource */
  5753. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5754. &param);
  5755. if (rc != 0) {
  5756. CAM_ERR(CAM_ISP, "Acquire device failed");
  5757. goto free_res;
  5758. }
  5759. ctx_isp->support_consumed_addr =
  5760. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5761. /* Query the context has rdi only resource */
  5762. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5763. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5764. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5765. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5766. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5767. &hw_cmd_args);
  5768. if (rc) {
  5769. CAM_ERR(CAM_ISP, "HW command failed");
  5770. goto free_hw;
  5771. }
  5772. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5773. /*
  5774. * this context has rdi only resource assign rdi only
  5775. * state machine
  5776. */
  5777. CAM_DBG(CAM_ISP, "RDI only session Context");
  5778. ctx_isp->substate_machine_irq =
  5779. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5780. ctx_isp->substate_machine =
  5781. cam_isp_ctx_rdi_only_activated_state_machine;
  5782. ctx_isp->rdi_only_context = true;
  5783. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5784. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5785. ctx_isp->substate_machine_irq =
  5786. cam_isp_ctx_fs2_state_machine_irq;
  5787. ctx_isp->substate_machine =
  5788. cam_isp_ctx_fs2_state_machine;
  5789. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5790. CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
  5791. ctx_isp->substate_machine_irq =
  5792. cam_isp_ctx_offline_state_machine_irq;
  5793. ctx_isp->substate_machine = NULL;
  5794. } else {
  5795. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5796. ctx_isp->substate_machine_irq =
  5797. cam_isp_ctx_activated_state_machine_irq;
  5798. ctx_isp->substate_machine =
  5799. cam_isp_ctx_activated_state_machine;
  5800. }
  5801. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5802. ctx_isp->hw_acquired = true;
  5803. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5804. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5805. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5806. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5807. trace_cam_context_state("ISP", ctx);
  5808. CAM_DBG(CAM_ISP,
  5809. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5810. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5811. kfree(acquire_hw_info);
  5812. return rc;
  5813. free_hw:
  5814. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5815. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5816. ctx_isp->hw_ctx = NULL;
  5817. ctx_isp->hw_acquired = false;
  5818. free_res:
  5819. kfree(acquire_hw_info);
  5820. end:
  5821. return rc;
  5822. }
  5823. static void cam_req_mgr_process_workq_apply_req_worker(struct work_struct *w)
  5824. {
  5825. cam_req_mgr_process_workq(w);
  5826. }
  5827. static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
  5828. void *args)
  5829. {
  5830. int rc = 0, i, j;
  5831. struct cam_acquire_hw_cmd_v2 *cmd =
  5832. (struct cam_acquire_hw_cmd_v2 *)args;
  5833. struct cam_hw_acquire_args param;
  5834. struct cam_hw_release_args release;
  5835. struct cam_isp_context *ctx_isp =
  5836. (struct cam_isp_context *) ctx->ctx_priv;
  5837. struct cam_hw_cmd_args hw_cmd_args;
  5838. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5839. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5840. if (!ctx->hw_mgr_intf) {
  5841. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5842. rc = -EFAULT;
  5843. goto end;
  5844. }
  5845. CAM_DBG(CAM_ISP,
  5846. "session_hdl 0x%x, hdl type %d, res %lld",
  5847. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5848. /* for now we only support user pointer */
  5849. if (cmd->handle_type != 1) {
  5850. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5851. rc = -EINVAL;
  5852. goto end;
  5853. }
  5854. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5855. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5856. goto end;
  5857. }
  5858. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5859. if (!acquire_hw_info) {
  5860. rc = -ENOMEM;
  5861. goto end;
  5862. }
  5863. CAM_DBG(CAM_ISP, "start copy resources from user");
  5864. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5865. cmd->data_size)) {
  5866. rc = -EFAULT;
  5867. goto free_res;
  5868. }
  5869. memset(&param, 0, sizeof(param));
  5870. param.context_data = ctx;
  5871. param.event_cb = ctx->irq_cb_intf;
  5872. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5873. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5874. param.acquire_info_size = cmd->data_size;
  5875. param.acquire_info = (uint64_t) acquire_hw_info;
  5876. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5877. /* call HW manager to reserve the resource */
  5878. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5879. &param);
  5880. if (rc != 0) {
  5881. CAM_ERR(CAM_ISP, "Acquire device failed");
  5882. goto free_res;
  5883. }
  5884. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5885. if (rc) {
  5886. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5887. ctx->ctx_id);
  5888. goto free_hw;
  5889. }
  5890. /*
  5891. * Set feature flag if applicable
  5892. * custom hw is supported only on v2
  5893. */
  5894. ctx_isp->custom_enabled =
  5895. (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
  5896. ctx_isp->use_frame_header_ts =
  5897. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5898. ctx_isp->use_default_apply =
  5899. (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG);
  5900. ctx_isp->support_consumed_addr =
  5901. (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
  5902. ctx_isp->aeb_enabled =
  5903. (param.op_flags & CAM_IFE_CTX_AEB_EN);
  5904. if ((ctx_isp->aeb_enabled) && (!isp_ctx_debug.disable_internal_recovery))
  5905. ctx_isp->do_internal_recovery = true;
  5906. /* Query the context has rdi only resource */
  5907. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5908. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5909. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5910. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5911. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5912. &hw_cmd_args);
  5913. if (rc) {
  5914. CAM_ERR(CAM_ISP, "HW command failed");
  5915. goto free_hw;
  5916. }
  5917. if (param.valid_acquired_hw) {
  5918. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5919. cmd->hw_info.acquired_hw_id[i] =
  5920. param.acquired_hw_id[i];
  5921. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5922. for (j = 0; j < CAM_MAX_HW_SPLIT; j++)
  5923. cmd->hw_info.acquired_hw_path[i][j] =
  5924. param.acquired_hw_path[i][j];
  5925. }
  5926. cmd->hw_info.valid_acquired_hw =
  5927. param.valid_acquired_hw;
  5928. cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw;
  5929. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5930. /*
  5931. * this context has rdi only resource assign rdi only
  5932. * state machine
  5933. */
  5934. CAM_DBG(CAM_ISP, "RDI only session Context");
  5935. ctx_isp->substate_machine_irq =
  5936. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5937. ctx_isp->substate_machine =
  5938. cam_isp_ctx_rdi_only_activated_state_machine;
  5939. ctx_isp->rdi_only_context = true;
  5940. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5941. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5942. ctx_isp->substate_machine_irq =
  5943. cam_isp_ctx_fs2_state_machine_irq;
  5944. ctx_isp->substate_machine =
  5945. cam_isp_ctx_fs2_state_machine;
  5946. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5947. CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
  5948. ctx_isp->substate_machine_irq =
  5949. cam_isp_ctx_offline_state_machine_irq;
  5950. ctx_isp->substate_machine = NULL;
  5951. ctx_isp->offline_context = true;
  5952. } else {
  5953. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5954. ctx_isp->substate_machine_irq =
  5955. cam_isp_ctx_activated_state_machine_irq;
  5956. ctx_isp->substate_machine =
  5957. cam_isp_ctx_activated_state_machine;
  5958. }
  5959. if (ctx_isp->offline_context || ctx_isp->vfps_aux_context) {
  5960. rc = cam_req_mgr_workq_create("ife_apply_req", 20,
  5961. &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0,
  5962. cam_req_mgr_process_workq_apply_req_worker);
  5963. if (rc)
  5964. CAM_ERR(CAM_ISP,
  5965. "Failed to create workq for IFE rc:%d offline: %s vfps: %s",
  5966. rc, CAM_BOOL_TO_YESNO(ctx_isp->offline_context),
  5967. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context));
  5968. }
  5969. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5970. ctx_isp->hw_acquired = true;
  5971. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5972. trace_cam_context_state("ISP", ctx);
  5973. CAM_DBG(CAM_ISP,
  5974. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5975. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5976. kfree(acquire_hw_info);
  5977. return rc;
  5978. free_hw:
  5979. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5980. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5981. ctx_isp->hw_ctx = NULL;
  5982. ctx_isp->hw_acquired = false;
  5983. free_res:
  5984. kfree(acquire_hw_info);
  5985. end:
  5986. return rc;
  5987. }
  5988. static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
  5989. void *args)
  5990. {
  5991. int rc = -EINVAL;
  5992. uint32_t api_version;
  5993. if (!ctx || !args) {
  5994. CAM_ERR(CAM_ISP, "Invalid input pointer");
  5995. return rc;
  5996. }
  5997. api_version = *((uint32_t *)args);
  5998. if (api_version == 1)
  5999. rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
  6000. else if (api_version == 2)
  6001. rc = __cam_isp_ctx_acquire_hw_v2(ctx, args);
  6002. else
  6003. CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
  6004. return rc;
  6005. }
  6006. static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
  6007. struct cam_config_dev_cmd *cmd)
  6008. {
  6009. int rc = 0;
  6010. struct cam_isp_context *ctx_isp =
  6011. (struct cam_isp_context *) ctx->ctx_priv;
  6012. if (!ctx_isp->hw_acquired) {
  6013. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  6014. return -EINVAL;
  6015. }
  6016. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  6017. if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
  6018. ctx->state = CAM_CTX_READY;
  6019. trace_cam_context_state("ISP", ctx);
  6020. }
  6021. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  6022. return rc;
  6023. }
  6024. static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
  6025. struct cam_config_dev_cmd *cmd)
  6026. {
  6027. int rc = 0;
  6028. struct cam_start_stop_dev_cmd start_cmd;
  6029. struct cam_hw_cmd_args hw_cmd_args;
  6030. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6031. struct cam_isp_context *ctx_isp =
  6032. (struct cam_isp_context *) ctx->ctx_priv;
  6033. if (!ctx_isp->hw_acquired) {
  6034. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  6035. rc = -EINVAL;
  6036. goto end;
  6037. }
  6038. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  6039. if (rc)
  6040. goto end;
  6041. if (!ctx_isp->init_received) {
  6042. CAM_WARN(CAM_ISP,
  6043. "Received update packet in flushed state, skip start");
  6044. goto end;
  6045. }
  6046. CAM_DBG(CAM_ISP, "vfps_ctx:%s resume_hw_in_flushed:%d ctx:%d",
  6047. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context),
  6048. ctx_isp->resume_hw_in_flushed,
  6049. ctx->ctx_id);
  6050. if (ctx_isp->vfps_aux_context) {
  6051. /* Resume the HW only when we get first valid req */
  6052. if (!ctx_isp->resume_hw_in_flushed)
  6053. goto end;
  6054. else
  6055. ctx_isp->resume_hw_in_flushed = false;
  6056. }
  6057. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6058. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6059. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6060. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6061. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6062. &hw_cmd_args);
  6063. if (rc) {
  6064. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
  6065. goto end;
  6066. }
  6067. start_cmd.dev_handle = cmd->dev_handle;
  6068. start_cmd.session_handle = cmd->session_handle;
  6069. rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
  6070. if (rc)
  6071. CAM_ERR(CAM_ISP,
  6072. "Failed to re-start HW after flush rc: %d", rc);
  6073. else
  6074. CAM_INFO(CAM_ISP,
  6075. "Received init after flush. Re-start HW complete in ctx:%d",
  6076. ctx->ctx_id);
  6077. end:
  6078. CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
  6079. ctx_isp->substate_activated);
  6080. return rc;
  6081. }
  6082. static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
  6083. struct cam_req_mgr_core_dev_link_setup *link)
  6084. {
  6085. int rc = 0;
  6086. struct cam_isp_context *ctx_isp =
  6087. (struct cam_isp_context *) ctx->ctx_priv;
  6088. if (!link) {
  6089. CAM_ERR(CAM_ISP, "setup link info is null: %pK ctx: %u",
  6090. link, ctx->ctx_id);
  6091. return -EINVAL;
  6092. }
  6093. if (!link->crm_cb) {
  6094. CAM_ERR(CAM_ISP, "crm cb is null: %pK ctx: %u",
  6095. link->crm_cb, ctx->ctx_id);
  6096. return -EINVAL;
  6097. }
  6098. CAM_DBG(CAM_ISP, "Enter.........");
  6099. ctx->link_hdl = link->link_hdl;
  6100. ctx->ctx_crm_intf = link->crm_cb;
  6101. ctx_isp->subscribe_event =
  6102. CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF;
  6103. ctx_isp->trigger_id = link->trigger_id;
  6104. /* change state only if we had the init config */
  6105. if (ctx_isp->init_received) {
  6106. ctx->state = CAM_CTX_READY;
  6107. trace_cam_context_state("ISP", ctx);
  6108. }
  6109. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  6110. return rc;
  6111. }
  6112. static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
  6113. struct cam_req_mgr_core_dev_link_setup *unlink)
  6114. {
  6115. int rc = 0;
  6116. struct cam_isp_context *ctx_isp =
  6117. (struct cam_isp_context *) ctx->ctx_priv;
  6118. ctx->link_hdl = -1;
  6119. ctx->ctx_crm_intf = NULL;
  6120. ctx_isp->trigger_id = -1;
  6121. return rc;
  6122. }
  6123. static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  6124. struct cam_req_mgr_device_info *dev_info)
  6125. {
  6126. int rc = 0;
  6127. dev_info->dev_hdl = ctx->dev_hdl;
  6128. strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
  6129. dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
  6130. dev_info->p_delay = 1;
  6131. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  6132. dev_info->trigger_on = true;
  6133. return rc;
  6134. }
  6135. static inline void __cam_isp_context_reset_ctx_params(
  6136. struct cam_isp_context *ctx_isp)
  6137. {
  6138. atomic_set(&ctx_isp->process_bubble, 0);
  6139. atomic_set(&ctx_isp->rxd_epoch, 0);
  6140. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6141. ctx_isp->frame_id = 0;
  6142. ctx_isp->sof_timestamp_val = 0;
  6143. ctx_isp->boot_timestamp = 0;
  6144. ctx_isp->active_req_cnt = 0;
  6145. ctx_isp->reported_req_id = 0;
  6146. ctx_isp->reported_frame_id = 0;
  6147. ctx_isp->bubble_frame_cnt = 0;
  6148. ctx_isp->recovery_req_id = 0;
  6149. ctx_isp->aeb_error_cnt = 0;
  6150. }
  6151. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  6152. struct cam_start_stop_dev_cmd *cmd)
  6153. {
  6154. int rc = 0;
  6155. int i;
  6156. struct cam_isp_start_args start_isp;
  6157. struct cam_ctx_request *req;
  6158. struct cam_isp_ctx_req *req_isp;
  6159. struct cam_isp_context *ctx_isp =
  6160. (struct cam_isp_context *) ctx->ctx_priv;
  6161. if (cmd->session_handle != ctx->session_hdl ||
  6162. cmd->dev_handle != ctx->dev_hdl) {
  6163. rc = -EPERM;
  6164. goto end;
  6165. }
  6166. if (list_empty(&ctx->pending_req_list)) {
  6167. /* should never happen */
  6168. CAM_ERR(CAM_ISP, "Start device with empty configuration");
  6169. rc = -EFAULT;
  6170. goto end;
  6171. } else {
  6172. req = list_first_entry(&ctx->pending_req_list,
  6173. struct cam_ctx_request, list);
  6174. }
  6175. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6176. if (!ctx_isp->hw_ctx) {
  6177. CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
  6178. rc = -EFAULT;
  6179. goto end;
  6180. }
  6181. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6182. start_isp.hw_config.request_id = req->request_id;
  6183. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6184. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6185. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6186. start_isp.hw_config.init_packet = 1;
  6187. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_NONE;
  6188. start_isp.hw_config.cdm_reset_before_apply = false;
  6189. start_isp.is_internal_start = false;
  6190. ctx_isp->last_applied_req_id = req->request_id;
  6191. if (ctx->state == CAM_CTX_FLUSHED)
  6192. start_isp.start_only = true;
  6193. else
  6194. start_isp.start_only = false;
  6195. __cam_isp_context_reset_ctx_params(ctx_isp);
  6196. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6197. CAM_ISP_CTX_ACTIVATED_APPLIED :
  6198. (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
  6199. CAM_ISP_CTX_ACTIVATED_SOF;
  6200. atomic64_set(&ctx_isp->state_monitor_head, -1);
  6201. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6202. atomic64_set(&ctx_isp->event_record_head[i], -1);
  6203. /*
  6204. * In case of CSID TPG we might receive SOF and RUP IRQs
  6205. * before hw_mgr_intf->hw_start has returned. So move
  6206. * req out of pending list before hw_start and add it
  6207. * back to pending list if hw_start fails.
  6208. */
  6209. list_del_init(&req->list);
  6210. if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
  6211. list_add_tail(&req->list, &ctx->free_req_list);
  6212. atomic_set(&ctx_isp->rxd_epoch, 1);
  6213. CAM_DBG(CAM_REQ,
  6214. "Move pending req: %lld to free list(cnt: %d) offline ctx %u",
  6215. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  6216. } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
  6217. list_add_tail(&req->list, &ctx->wait_req_list);
  6218. CAM_DBG(CAM_REQ,
  6219. "Move pending req: %lld to wait list(cnt: %d) ctx %u",
  6220. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  6221. } else {
  6222. list_add_tail(&req->list, &ctx->active_req_list);
  6223. ctx_isp->active_req_cnt++;
  6224. CAM_DBG(CAM_REQ,
  6225. "Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
  6226. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
  6227. ctx_isp->offline_context);
  6228. }
  6229. /*
  6230. * Only place to change state before calling the hw due to
  6231. * hardware tasklet has higher priority that can cause the
  6232. * irq handling comes early
  6233. */
  6234. ctx->state = CAM_CTX_ACTIVATED;
  6235. trace_cam_context_state("ISP", ctx);
  6236. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6237. &start_isp);
  6238. if (rc) {
  6239. /* HW failure. user need to clean up the resource */
  6240. CAM_ERR(CAM_ISP, "Start HW failed");
  6241. ctx->state = CAM_CTX_READY;
  6242. if ((rc == -ETIMEDOUT) &&
  6243. (isp_ctx_debug.enable_cdm_cmd_buff_dump))
  6244. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  6245. trace_cam_context_state("ISP", ctx);
  6246. list_del_init(&req->list);
  6247. list_add(&req->list, &ctx->pending_req_list);
  6248. goto end;
  6249. }
  6250. CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
  6251. end:
  6252. return rc;
  6253. }
  6254. static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
  6255. struct cam_req_mgr_core_dev_link_setup *unlink)
  6256. {
  6257. int rc = 0;
  6258. ctx->link_hdl = -1;
  6259. ctx->ctx_crm_intf = NULL;
  6260. ctx->state = CAM_CTX_ACQUIRED;
  6261. trace_cam_context_state("ISP", ctx);
  6262. return rc;
  6263. }
  6264. static int __cam_isp_ctx_stop_dev_in_activated_unlock(
  6265. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  6266. {
  6267. int rc = 0;
  6268. uint32_t i;
  6269. struct cam_hw_stop_args stop;
  6270. struct cam_ctx_request *req;
  6271. struct cam_isp_ctx_req *req_isp;
  6272. struct cam_isp_context *ctx_isp =
  6273. (struct cam_isp_context *) ctx->ctx_priv;
  6274. struct cam_isp_stop_args stop_isp;
  6275. /* Mask off all the incoming hardware events */
  6276. spin_lock_bh(&ctx->lock);
  6277. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6278. spin_unlock_bh(&ctx->lock);
  6279. /* stop hw first */
  6280. if (ctx_isp->hw_ctx) {
  6281. stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6282. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6283. stop_isp.stop_only = false;
  6284. stop_isp.is_internal_stop = false;
  6285. stop.args = (void *) &stop_isp;
  6286. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6287. &stop);
  6288. }
  6289. CAM_DBG(CAM_ISP, "next Substate[%s]",
  6290. __cam_isp_ctx_substate_val_to_type(
  6291. ctx_isp->substate_activated));
  6292. if (ctx->ctx_crm_intf &&
  6293. ctx->ctx_crm_intf->notify_stop) {
  6294. struct cam_req_mgr_notify_stop notify;
  6295. notify.link_hdl = ctx->link_hdl;
  6296. CAM_DBG(CAM_ISP,
  6297. "Notify CRM about device stop ctx %u link 0x%x",
  6298. ctx->ctx_id, ctx->link_hdl);
  6299. ctx->ctx_crm_intf->notify_stop(&notify);
  6300. } else if (!ctx_isp->offline_context)
  6301. CAM_ERR(CAM_ISP, "cb not present");
  6302. while (!list_empty(&ctx->pending_req_list)) {
  6303. req = list_first_entry(&ctx->pending_req_list,
  6304. struct cam_ctx_request, list);
  6305. list_del_init(&req->list);
  6306. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6307. CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
  6308. req_isp->num_fence_map_out);
  6309. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6310. if (req_isp->fence_map_out[i].sync_id != -1) {
  6311. cam_sync_signal(
  6312. req_isp->fence_map_out[i].sync_id,
  6313. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6314. CAM_SYNC_ISP_EVENT_HW_STOP);
  6315. }
  6316. list_add_tail(&req->list, &ctx->free_req_list);
  6317. }
  6318. while (!list_empty(&ctx->wait_req_list)) {
  6319. req = list_first_entry(&ctx->wait_req_list,
  6320. struct cam_ctx_request, list);
  6321. list_del_init(&req->list);
  6322. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6323. CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
  6324. req_isp->num_fence_map_out);
  6325. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6326. if (req_isp->fence_map_out[i].sync_id != -1) {
  6327. cam_sync_signal(
  6328. req_isp->fence_map_out[i].sync_id,
  6329. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6330. CAM_SYNC_ISP_EVENT_HW_STOP);
  6331. }
  6332. list_add_tail(&req->list, &ctx->free_req_list);
  6333. }
  6334. while (!list_empty(&ctx->active_req_list)) {
  6335. req = list_first_entry(&ctx->active_req_list,
  6336. struct cam_ctx_request, list);
  6337. list_del_init(&req->list);
  6338. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6339. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  6340. req_isp->num_fence_map_out);
  6341. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6342. if (req_isp->fence_map_out[i].sync_id != -1) {
  6343. cam_sync_signal(
  6344. req_isp->fence_map_out[i].sync_id,
  6345. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6346. CAM_SYNC_ISP_EVENT_HW_STOP);
  6347. }
  6348. list_add_tail(&req->list, &ctx->free_req_list);
  6349. }
  6350. ctx_isp->frame_id = 0;
  6351. ctx_isp->active_req_cnt = 0;
  6352. ctx_isp->reported_req_id = 0;
  6353. ctx_isp->reported_frame_id = 0;
  6354. ctx_isp->last_applied_req_id = 0;
  6355. ctx_isp->req_info.last_bufdone_req_id = 0;
  6356. ctx_isp->bubble_frame_cnt = 0;
  6357. atomic_set(&ctx_isp->process_bubble, 0);
  6358. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6359. atomic_set(&ctx_isp->rxd_epoch, 0);
  6360. atomic64_set(&ctx_isp->state_monitor_head, -1);
  6361. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6362. atomic64_set(&ctx_isp->event_record_head[i], -1);
  6363. CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
  6364. ctx->state, ctx->ctx_id);
  6365. if (!stop_cmd) {
  6366. rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL);
  6367. if (rc)
  6368. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6369. }
  6370. return rc;
  6371. }
  6372. static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
  6373. struct cam_start_stop_dev_cmd *cmd)
  6374. {
  6375. int rc = 0;
  6376. struct cam_isp_context *ctx_isp =
  6377. (struct cam_isp_context *)ctx->ctx_priv;
  6378. __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
  6379. ctx_isp->init_received = false;
  6380. ctx->state = CAM_CTX_ACQUIRED;
  6381. trace_cam_context_state("ISP", ctx);
  6382. return rc;
  6383. }
  6384. static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
  6385. struct cam_release_dev_cmd *cmd)
  6386. {
  6387. int rc = 0;
  6388. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6389. if (rc)
  6390. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6391. rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
  6392. if (rc)
  6393. CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
  6394. return rc;
  6395. }
  6396. static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
  6397. void *cmd)
  6398. {
  6399. int rc = 0;
  6400. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6401. if (rc)
  6402. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6403. rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
  6404. if (rc)
  6405. CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
  6406. return rc;
  6407. }
  6408. static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
  6409. {
  6410. int rc = 0;
  6411. struct cam_hw_cmd_args hw_cmd_args;
  6412. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6413. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6414. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6415. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
  6416. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6417. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6418. &hw_cmd_args);
  6419. return rc;
  6420. }
  6421. static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
  6422. {
  6423. int rc = 0;
  6424. struct cam_hw_cmd_args hw_cmd_args;
  6425. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6426. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6427. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6428. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6429. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6430. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6431. &hw_cmd_args);
  6432. return rc;
  6433. }
  6434. static int __cam_isp_ctx_handle_sof_freeze_evt(
  6435. struct cam_context *ctx)
  6436. {
  6437. int rc = 0;
  6438. struct cam_hw_cmd_args hw_cmd_args;
  6439. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6440. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6441. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6442. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
  6443. isp_hw_cmd_args.u.sof_irq_enable = 1;
  6444. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6445. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6446. &hw_cmd_args);
  6447. return rc;
  6448. }
  6449. static int __cam_isp_ctx_reset_and_recover(
  6450. bool skip_resume, struct cam_context *ctx)
  6451. {
  6452. int rc = 0;
  6453. struct cam_isp_context *ctx_isp =
  6454. (struct cam_isp_context *)ctx->ctx_priv;
  6455. struct cam_isp_stop_args stop_isp;
  6456. struct cam_hw_stop_args stop_args;
  6457. struct cam_isp_start_args start_isp;
  6458. struct cam_hw_cmd_args hw_cmd_args;
  6459. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6460. struct cam_ctx_request *req;
  6461. struct cam_isp_ctx_req *req_isp;
  6462. spin_lock_bh(&ctx->lock);
  6463. if (ctx_isp->active_req_cnt) {
  6464. spin_unlock_bh(&ctx->lock);
  6465. CAM_WARN(CAM_ISP,
  6466. "Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done",
  6467. ctx_isp->active_req_cnt, ctx->ctx_id,
  6468. ctx->link_hdl, ctx_isp->recovery_req_id);
  6469. goto end;
  6470. }
  6471. if (ctx->state != CAM_CTX_ACTIVATED) {
  6472. spin_unlock_bh(&ctx->lock);
  6473. CAM_ERR(CAM_ISP,
  6474. "In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld",
  6475. ctx->state, ctx->ctx_id,
  6476. ctx->link_hdl, ctx_isp->recovery_req_id);
  6477. rc = -EINVAL;
  6478. goto end;
  6479. }
  6480. if (list_empty(&ctx->pending_req_list)) {
  6481. /* Cannot start with no request */
  6482. spin_unlock_bh(&ctx->lock);
  6483. CAM_ERR(CAM_ISP,
  6484. "Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x",
  6485. ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl);
  6486. rc = -EFAULT;
  6487. goto end;
  6488. }
  6489. if (!ctx_isp->hw_ctx) {
  6490. spin_unlock_bh(&ctx->lock);
  6491. CAM_ERR(CAM_ISP,
  6492. "Invalid hw context pointer ctx: %u on link: 0x%x",
  6493. ctx->ctx_id, ctx->link_hdl);
  6494. rc = -EFAULT;
  6495. goto end;
  6496. }
  6497. /* Block all events till HW is resumed */
  6498. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6499. req = list_first_entry(&ctx->pending_req_list,
  6500. struct cam_ctx_request, list);
  6501. spin_unlock_bh(&ctx->lock);
  6502. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6503. CAM_INFO(CAM_ISP,
  6504. "Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x",
  6505. req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl);
  6506. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6507. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6508. stop_isp.stop_only = true;
  6509. stop_isp.is_internal_stop = true;
  6510. stop_args.args = (void *)&stop_isp;
  6511. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6512. &stop_args);
  6513. if (rc) {
  6514. CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u",
  6515. rc, ctx->ctx_id);
  6516. goto end;
  6517. }
  6518. CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x",
  6519. ctx->ctx_id, ctx->link_hdl);
  6520. /* API provides provision to stream off and not resume as well in case of fatal errors */
  6521. if (skip_resume) {
  6522. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6523. CAM_INFO(CAM_ISP,
  6524. "Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x",
  6525. ctx->ctx_id, ctx_isp->last_applied_req_id,
  6526. ctx_isp->recovery_req_id, ctx->link_hdl);
  6527. goto end;
  6528. }
  6529. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6530. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6531. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6532. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6533. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6534. &hw_cmd_args);
  6535. if (rc) {
  6536. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u", rc, ctx->ctx_id);
  6537. goto end;
  6538. }
  6539. CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x",
  6540. ctx->ctx_id, ctx->link_hdl);
  6541. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6542. start_isp.hw_config.request_id = req->request_id;
  6543. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6544. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6545. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6546. start_isp.hw_config.init_packet = 1;
  6547. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ;
  6548. start_isp.hw_config.cdm_reset_before_apply = false;
  6549. start_isp.start_only = true;
  6550. start_isp.is_internal_start = true;
  6551. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  6552. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6553. CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
  6554. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6555. &start_isp);
  6556. if (rc) {
  6557. CAM_ERR(CAM_ISP, "Start HW failed");
  6558. ctx->state = CAM_CTX_READY;
  6559. goto end;
  6560. }
  6561. /* IQ applied for this request, on next trigger skip IQ cfg */
  6562. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  6563. /* Notify userland that KMD has done internal recovery */
  6564. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY,
  6565. 0, req->request_id, ctx);
  6566. CAM_INFO(CAM_ISP, "Internal Start HW success ctx %u on link: 0x%x for req: %llu",
  6567. ctx->ctx_id, ctx->link_hdl, req->request_id);
  6568. end:
  6569. return rc;
  6570. }
  6571. static bool __cam_isp_ctx_try_internal_recovery_for_bubble(
  6572. int64_t error_req_id, struct cam_context *ctx)
  6573. {
  6574. int rc;
  6575. struct cam_isp_context *ctx_isp =
  6576. (struct cam_isp_context *)ctx->ctx_priv;
  6577. /* Perform recovery if bubble recovery is stalled */
  6578. if (!atomic_read(&ctx_isp->process_bubble))
  6579. return false;
  6580. /* Validate if errored request has been applied */
  6581. if (ctx_isp->last_applied_req_id < error_req_id) {
  6582. CAM_WARN(CAM_ISP,
  6583. "Skip trying for internal recovery last applied: %lld error_req: %lld for ctx: %u on link: 0x%x",
  6584. ctx_isp->last_applied_req_id, error_req_id,
  6585. ctx->ctx_id, ctx->link_hdl);
  6586. return false;
  6587. }
  6588. if (__cam_isp_ctx_validate_for_req_reapply_util(ctx_isp)) {
  6589. CAM_WARN(CAM_ISP,
  6590. "Internal recovery not possible for ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6591. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6592. return false;
  6593. }
  6594. /* Trigger reset and recover */
  6595. atomic_set(&ctx_isp->internal_recovery_set, 1);
  6596. rc = __cam_isp_ctx_reset_and_recover(false, ctx);
  6597. if (rc) {
  6598. CAM_WARN(CAM_ISP,
  6599. "Internal recovery failed in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6600. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6601. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6602. goto error;
  6603. }
  6604. CAM_DBG(CAM_ISP,
  6605. "Internal recovery done in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6606. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6607. return true;
  6608. error:
  6609. return false;
  6610. }
  6611. static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
  6612. struct cam_req_mgr_link_evt_data *link_evt_data)
  6613. {
  6614. int rc = 0;
  6615. struct cam_isp_context *ctx_isp =
  6616. (struct cam_isp_context *) ctx->ctx_priv;
  6617. if ((ctx->state == CAM_CTX_ACQUIRED) &&
  6618. (link_evt_data->evt_type != CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES)) {
  6619. CAM_WARN(CAM_ISP,
  6620. "Get unexpect evt:%d in acquired state",
  6621. link_evt_data->evt_type);
  6622. return -EINVAL;
  6623. }
  6624. switch (link_evt_data->evt_type) {
  6625. case CAM_REQ_MGR_LINK_EVT_ERR:
  6626. case CAM_REQ_MGR_LINK_EVT_EOF:
  6627. /* No handling */
  6628. break;
  6629. case CAM_REQ_MGR_LINK_EVT_PAUSE:
  6630. rc = __cam_isp_ctx_link_pause(ctx);
  6631. break;
  6632. case CAM_REQ_MGR_LINK_EVT_RESUME:
  6633. rc = __cam_isp_ctx_link_resume(ctx);
  6634. break;
  6635. case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
  6636. rc = __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  6637. break;
  6638. case CAM_REQ_MGR_LINK_EVT_STALLED: {
  6639. bool internal_recovery_skipped = false;
  6640. if (ctx->state == CAM_CTX_ACTIVATED) {
  6641. if (link_evt_data->try_for_recovery)
  6642. internal_recovery_skipped =
  6643. __cam_isp_ctx_try_internal_recovery_for_bubble(
  6644. link_evt_data->req_id, ctx);
  6645. if (!internal_recovery_skipped)
  6646. rc = __cam_isp_ctx_trigger_reg_dump(
  6647. CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  6648. }
  6649. link_evt_data->try_for_recovery = internal_recovery_skipped;
  6650. }
  6651. break;
  6652. case CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES:
  6653. if (link_evt_data->u.properties_mask &
  6654. CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF)
  6655. ctx_isp->vfps_aux_context = true;
  6656. else
  6657. ctx_isp->vfps_aux_context = false;
  6658. CAM_DBG(CAM_ISP, "vfps_aux_context:%s on ctx: %u",
  6659. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context), ctx->ctx_id);
  6660. break;
  6661. default:
  6662. CAM_WARN(CAM_ISP,
  6663. "Unsupported event type: 0x%x on ctx: %u",
  6664. link_evt_data->evt_type, ctx->ctx_id);
  6665. rc = -EINVAL;
  6666. break;
  6667. }
  6668. return rc;
  6669. }
  6670. static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
  6671. struct cam_req_mgr_core_dev_link_setup *unlink)
  6672. {
  6673. int rc = 0;
  6674. CAM_WARN(CAM_ISP,
  6675. "Received unlink in activated state. It's unexpected");
  6676. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6677. if (rc)
  6678. CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
  6679. rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
  6680. if (rc)
  6681. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6682. return rc;
  6683. }
  6684. static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
  6685. struct cam_req_mgr_apply_request *apply)
  6686. {
  6687. int rc = 0;
  6688. struct cam_ctx_ops *ctx_ops = NULL;
  6689. struct cam_isp_context *ctx_isp =
  6690. (struct cam_isp_context *) ctx->ctx_priv;
  6691. trace_cam_apply_req("ISP", ctx->ctx_id, apply->request_id, apply->link_hdl);
  6692. CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld",
  6693. __cam_isp_ctx_substate_val_to_type(
  6694. ctx_isp->substate_activated), apply->request_id);
  6695. ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
  6696. if (ctx_ops->crm_ops.apply_req) {
  6697. rc = ctx_ops->crm_ops.apply_req(ctx, apply);
  6698. } else {
  6699. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6700. "No handle function in activated Substate[%s]",
  6701. __cam_isp_ctx_substate_val_to_type(
  6702. ctx_isp->substate_activated));
  6703. rc = -EFAULT;
  6704. }
  6705. if (rc)
  6706. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6707. "Apply failed in active Substate[%s] rc %d",
  6708. __cam_isp_ctx_substate_val_to_type(
  6709. ctx_isp->substate_activated), rc);
  6710. return rc;
  6711. }
  6712. static int __cam_isp_ctx_apply_default_settings(
  6713. struct cam_context *ctx,
  6714. struct cam_req_mgr_apply_request *apply)
  6715. {
  6716. int rc = 0;
  6717. struct cam_ctx_ops *ctx_ops = NULL;
  6718. struct cam_isp_context *ctx_isp =
  6719. (struct cam_isp_context *) ctx->ctx_priv;
  6720. if ((!ctx_isp->use_default_apply) && !(atomic_read(&ctx_isp->internal_recovery_set)))
  6721. return 0;
  6722. if (!(apply->trigger_point & ctx_isp->subscribe_event)) {
  6723. CAM_WARN(CAM_ISP,
  6724. "Trigger: %u not subscribed for: %u",
  6725. apply->trigger_point, ctx_isp->subscribe_event);
  6726. return 0;
  6727. }
  6728. /* Allow apply default settings for IFE only at SOF */
  6729. if (apply->trigger_point != CAM_TRIGGER_POINT_SOF)
  6730. return 0;
  6731. if (atomic_read(&ctx_isp->internal_recovery_set))
  6732. return __cam_isp_ctx_reset_and_recover(false, ctx);
  6733. CAM_DBG(CAM_ISP,
  6734. "Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x",
  6735. ctx_isp->substate_activated, apply->request_id,
  6736. ctx->ctx_id, ctx->link_hdl);
  6737. ctx_ops = &ctx_isp->substate_machine[
  6738. ctx_isp->substate_activated];
  6739. if (ctx_ops->crm_ops.notify_frame_skip) {
  6740. rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply);
  6741. } else {
  6742. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6743. "No handle function in activated substate %d",
  6744. ctx_isp->substate_activated);
  6745. rc = -EFAULT;
  6746. }
  6747. if (rc)
  6748. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6749. "Apply default failed in active substate %d rc %d",
  6750. ctx_isp->substate_activated, rc);
  6751. return rc;
  6752. }
  6753. static int __cam_isp_ctx_handle_irq_in_activated(void *context,
  6754. uint32_t evt_id, void *evt_data)
  6755. {
  6756. int rc = 0;
  6757. struct cam_isp_ctx_irq_ops *irq_ops = NULL;
  6758. struct cam_context *ctx = (struct cam_context *)context;
  6759. struct cam_isp_context *ctx_isp =
  6760. (struct cam_isp_context *)ctx->ctx_priv;
  6761. spin_lock(&ctx->lock);
  6762. trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
  6763. __cam_isp_ctx_get_event_ts(evt_id, evt_data));
  6764. CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d, ctx:%d",
  6765. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6766. ctx_isp->substate_activated), evt_id,
  6767. ctx->ctx_id);
  6768. irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
  6769. if (irq_ops->irq_ops[evt_id]) {
  6770. rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
  6771. } else {
  6772. CAM_DBG(CAM_ISP,
  6773. "No handle function for Substate[%s], evt id %d, ctx:%d",
  6774. __cam_isp_ctx_substate_val_to_type(
  6775. ctx_isp->substate_activated), evt_id,
  6776. ctx->ctx_id);
  6777. if (isp_ctx_debug.enable_state_monitor_dump)
  6778. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  6779. }
  6780. CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s], ctx:%d",
  6781. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6782. ctx_isp->substate_activated), ctx->ctx_id);
  6783. spin_unlock(&ctx->lock);
  6784. return rc;
  6785. }
  6786. static int cam_isp_context_validate_event_notify_injection(struct cam_context *ctx,
  6787. struct cam_hw_inject_evt_param *evt_params)
  6788. {
  6789. int rc = 0;
  6790. uint32_t evt_type;
  6791. uint64_t req_id;
  6792. req_id = evt_params->req_id;
  6793. evt_type = evt_params->u.evt_notify.evt_notify_type;
  6794. switch (evt_type) {
  6795. case V4L_EVENT_CAM_REQ_MGR_ERROR: {
  6796. struct cam_hw_inject_err_evt_param *err_evt_params =
  6797. &evt_params->u.evt_notify.u.err_evt_params;
  6798. switch (err_evt_params->err_type) {
  6799. case CAM_REQ_MGR_ERROR_TYPE_RECOVERY:
  6800. case CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE:
  6801. case CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY:
  6802. case CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY:
  6803. break;
  6804. default:
  6805. CAM_ERR(CAM_ISP,
  6806. "Invalid error type: %u for error event injection err type: %u req id: %llu ctx id: %u dev hdl: %d",
  6807. err_evt_params->err_type, err_evt_params->err_code,
  6808. req_id, ctx->ctx_id, ctx->dev_hdl);
  6809. return -EINVAL;
  6810. }
  6811. CAM_INFO(CAM_ISP,
  6812. "Inject ERR evt: err code: %u err type: %u req id: %llu ctx id: %u dev hdl: %d",
  6813. err_evt_params->err_code, err_evt_params->err_type,
  6814. req_id, ctx->ctx_id, ctx->dev_hdl);
  6815. break;
  6816. }
  6817. case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: {
  6818. struct cam_hw_inject_pf_evt_param *pf_evt_params =
  6819. &evt_params->u.evt_notify.u.pf_evt_params;
  6820. bool non_fatal_en;
  6821. rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en);
  6822. if (rc) {
  6823. CAM_ERR(CAM_ISP,
  6824. "Fail to query whether device's cb has non-fatal enabled rc:%d",
  6825. rc);
  6826. return rc;
  6827. }
  6828. if (!non_fatal_en) {
  6829. CAM_ERR(CAM_ISP,
  6830. "Fail to inject page fault event notification. Page fault is fatal for ISP");
  6831. return -EINVAL;
  6832. }
  6833. CAM_INFO(CAM_ISP,
  6834. "Inject PF evt: req_id: %llu ctx id: %u dev hdl: %d ctx found: %hhu",
  6835. req_id, ctx->ctx_id, ctx->dev_hdl, pf_evt_params->ctx_found);
  6836. break;
  6837. }
  6838. default:
  6839. CAM_ERR(CAM_ISP, "Event notification type not supported: %u", evt_type);
  6840. rc = -EINVAL;
  6841. }
  6842. return rc;
  6843. }
  6844. static int cam_isp_context_inject_evt(void *context, void *evt_args)
  6845. {
  6846. struct cam_context *ctx = context;
  6847. struct cam_isp_context *ctx_isp = NULL;
  6848. struct cam_hw_inject_evt_param *evt_params = evt_args;
  6849. int rc = 0;
  6850. if (!ctx || !evt_args) {
  6851. CAM_ERR(CAM_ISP,
  6852. "Invalid params ctx %s event args %s",
  6853. CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args));
  6854. return -EINVAL;
  6855. }
  6856. ctx_isp = ctx->ctx_priv;
  6857. if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE) {
  6858. rc = cam_isp_context_validate_event_notify_injection(ctx, evt_params);
  6859. if (rc) {
  6860. CAM_ERR(CAM_ISP,
  6861. "Event notification injection failed validation rc: %d", rc);
  6862. return rc;
  6863. }
  6864. } else {
  6865. CAM_ERR(CAM_ISP, "Buffer done err injection %u not supported by ISP",
  6866. evt_params->inject_id);
  6867. return -EINVAL;
  6868. }
  6869. memcpy(&ctx_isp->evt_inject_params, evt_params,
  6870. sizeof(struct cam_hw_inject_evt_param));
  6871. ctx_isp->evt_inject_params.is_valid = true;
  6872. return rc;
  6873. }
  6874. /* top state machine */
  6875. static struct cam_ctx_ops
  6876. cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  6877. /* Uninit */
  6878. {
  6879. .ioctl_ops = {},
  6880. .crm_ops = {},
  6881. .irq_ops = NULL,
  6882. },
  6883. /* Available */
  6884. {
  6885. .ioctl_ops = {
  6886. .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
  6887. },
  6888. .crm_ops = {},
  6889. .irq_ops = NULL,
  6890. },
  6891. /* Acquired */
  6892. {
  6893. .ioctl_ops = {
  6894. .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
  6895. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  6896. .config_dev = __cam_isp_ctx_config_dev_in_acquired,
  6897. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6898. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  6899. },
  6900. .crm_ops = {
  6901. .link = __cam_isp_ctx_link_in_acquired,
  6902. .unlink = __cam_isp_ctx_unlink_in_acquired,
  6903. .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
  6904. .process_evt = __cam_isp_ctx_process_evt,
  6905. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  6906. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6907. },
  6908. .irq_ops = NULL,
  6909. .pagefault_ops = cam_isp_context_dump_requests,
  6910. .dumpinfo_ops = cam_isp_context_info_dump,
  6911. .evt_inject_ops = cam_isp_context_inject_evt,
  6912. },
  6913. /* Ready */
  6914. {
  6915. .ioctl_ops = {
  6916. .start_dev = __cam_isp_ctx_start_dev_in_ready,
  6917. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  6918. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  6919. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6920. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  6921. },
  6922. .crm_ops = {
  6923. .unlink = __cam_isp_ctx_unlink_in_ready,
  6924. .flush_req = __cam_isp_ctx_flush_req_in_ready,
  6925. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6926. },
  6927. .irq_ops = NULL,
  6928. .pagefault_ops = cam_isp_context_dump_requests,
  6929. .dumpinfo_ops = cam_isp_context_info_dump,
  6930. .evt_inject_ops = cam_isp_context_inject_evt,
  6931. },
  6932. /* Flushed */
  6933. {
  6934. .ioctl_ops = {
  6935. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  6936. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  6937. .config_dev = __cam_isp_ctx_config_dev_in_flushed,
  6938. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  6939. },
  6940. .crm_ops = {
  6941. .unlink = __cam_isp_ctx_unlink_in_ready,
  6942. .process_evt = __cam_isp_ctx_process_evt,
  6943. .flush_req = __cam_isp_ctx_flush_req_in_flushed_state,
  6944. },
  6945. .irq_ops = NULL,
  6946. .pagefault_ops = cam_isp_context_dump_requests,
  6947. .dumpinfo_ops = cam_isp_context_info_dump,
  6948. .evt_inject_ops = cam_isp_context_inject_evt,
  6949. },
  6950. /* Activated */
  6951. {
  6952. .ioctl_ops = {
  6953. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  6954. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  6955. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  6956. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6957. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  6958. },
  6959. .crm_ops = {
  6960. .unlink = __cam_isp_ctx_unlink_in_activated,
  6961. .apply_req = __cam_isp_ctx_apply_req,
  6962. .notify_frame_skip =
  6963. __cam_isp_ctx_apply_default_settings,
  6964. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  6965. .process_evt = __cam_isp_ctx_process_evt,
  6966. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6967. },
  6968. .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
  6969. .pagefault_ops = cam_isp_context_dump_requests,
  6970. .dumpinfo_ops = cam_isp_context_info_dump,
  6971. .recovery_ops = cam_isp_context_hw_recovery,
  6972. .evt_inject_ops = cam_isp_context_inject_evt,
  6973. },
  6974. };
  6975. static int cam_isp_context_hw_recovery(void *priv, void *data)
  6976. {
  6977. struct cam_context *ctx = priv;
  6978. int rc = -EPERM;
  6979. if (ctx->hw_mgr_intf->hw_recovery)
  6980. rc = ctx->hw_mgr_intf->hw_recovery(ctx->hw_mgr_intf->hw_mgr_priv, data);
  6981. else
  6982. CAM_ERR(CAM_ISP, "hw mgr doesn't support recovery");
  6983. return rc;
  6984. }
  6985. static void cam_isp_context_find_faulted_context(struct cam_context *ctx,
  6986. struct list_head *req_list, struct cam_hw_dump_pf_args *pf_args, bool *found)
  6987. {
  6988. struct cam_ctx_request *req = NULL;
  6989. struct cam_ctx_request *req_temp = NULL;
  6990. int rc;
  6991. *found = false;
  6992. list_for_each_entry_safe(req, req_temp, req_list, list) {
  6993. CAM_INFO(CAM_ISP, "List req_id: %llu ctx id: %u",
  6994. req->request_id, ctx->ctx_id);
  6995. rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data);
  6996. if (rc)
  6997. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  6998. /*
  6999. * Found faulted buffer. Even if faulted ctx is found, but
  7000. * continue to search for faulted buffer
  7001. */
  7002. if (pf_args->pf_context_info.mem_type != CAM_FAULT_BUF_NOT_FOUND) {
  7003. *found = true;
  7004. break;
  7005. }
  7006. }
  7007. }
  7008. static int cam_isp_context_dump_requests(void *data, void *args)
  7009. {
  7010. struct cam_context *ctx = (struct cam_context *)data;
  7011. struct cam_isp_context *ctx_isp;
  7012. struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args;
  7013. int rc = 0;
  7014. bool found;
  7015. if (!ctx || !pf_args) {
  7016. CAM_ERR(CAM_ISP, "Invalid ctx %pK or pf args %pK",
  7017. ctx, pf_args);
  7018. return -EINVAL;
  7019. }
  7020. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  7021. if (!ctx_isp) {
  7022. CAM_ERR(CAM_ISP, "Invalid isp ctx");
  7023. return -EINVAL;
  7024. }
  7025. if (pf_args->handle_sec_pf)
  7026. goto end;
  7027. CAM_INFO(CAM_ISP,
  7028. "Iterating over active list for isp ctx %d state %d",
  7029. ctx->ctx_id, ctx->state);
  7030. cam_isp_context_find_faulted_context(ctx, &ctx->active_req_list,
  7031. pf_args, &found);
  7032. if (found)
  7033. goto end;
  7034. CAM_INFO(CAM_ISP,
  7035. "Iterating over waiting list of isp ctx %d state %d",
  7036. ctx->ctx_id, ctx->state);
  7037. cam_isp_context_find_faulted_context(ctx, &ctx->wait_req_list,
  7038. pf_args, &found);
  7039. if (found)
  7040. goto end;
  7041. /*
  7042. * In certain scenarios we observe both overflow and SMMU pagefault
  7043. * for a particular request. If overflow is handled before page fault
  7044. * we need to traverse through pending request list because if
  7045. * bubble recovery is enabled on any request we move that request
  7046. * and all the subsequent requests to the pending list while handling
  7047. * overflow error.
  7048. */
  7049. CAM_INFO(CAM_ISP,
  7050. "Iterating over pending req list of isp ctx %d state %d",
  7051. ctx->ctx_id, ctx->state);
  7052. cam_isp_context_find_faulted_context(ctx, &ctx->pending_req_list,
  7053. pf_args, &found);
  7054. if (found)
  7055. goto end;
  7056. end:
  7057. if (pf_args->pf_context_info.resource_type) {
  7058. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  7059. CAM_INFO(CAM_ISP,
  7060. "Page fault on resource:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  7061. __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type,
  7062. pf_args->pf_context_info.resource_type),
  7063. pf_args->pf_context_info.resource_type, ctx->ctx_id, ctx_isp->frame_id,
  7064. ctx_isp->reported_req_id, ctx_isp->last_applied_req_id);
  7065. }
  7066. /*
  7067. * Send PF notification to UMD if PF found on current CTX
  7068. * or it is forced to send PF notification to UMD even if no
  7069. * faulted context found
  7070. */
  7071. if (pf_args->pf_context_info.ctx_found ||
  7072. pf_args->pf_context_info.force_send_pf_evt)
  7073. rc = cam_context_send_pf_evt(ctx, pf_args);
  7074. if (rc)
  7075. CAM_ERR(CAM_ISP,
  7076. "Failed to notify PF event to userspace rc: %d", rc);
  7077. return rc;
  7078. }
  7079. static int cam_isp_context_debug_register(void)
  7080. {
  7081. int rc = 0;
  7082. struct dentry *dbgfileptr = NULL;
  7083. if (!cam_debugfs_available())
  7084. return 0;
  7085. rc = cam_debugfs_create_subdir("isp_ctx", &dbgfileptr);
  7086. if (rc) {
  7087. CAM_ERR(CAM_ISP, "DebugFS could not create directory!");
  7088. return rc;
  7089. }
  7090. /* Store parent inode for cleanup in caller */
  7091. isp_ctx_debug.dentry = dbgfileptr;
  7092. debugfs_create_u32("enable_state_monitor_dump", 0644,
  7093. isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
  7094. debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
  7095. isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
  7096. debugfs_create_bool("disable_internal_recovery", 0644,
  7097. isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery);
  7098. return 0;
  7099. }
  7100. int cam_isp_context_init(struct cam_isp_context *ctx,
  7101. struct cam_context *ctx_base,
  7102. struct cam_req_mgr_kmd_ops *crm_node_intf,
  7103. struct cam_hw_mgr_intf *hw_intf,
  7104. uint32_t ctx_id,
  7105. uint32_t isp_device_type,
  7106. int img_iommu_hdl)
  7107. {
  7108. int rc = -1;
  7109. int i;
  7110. if (!ctx || !ctx_base) {
  7111. CAM_ERR(CAM_ISP, "Invalid Context");
  7112. goto err;
  7113. }
  7114. /* ISP context setup */
  7115. memset(ctx, 0, sizeof(*ctx));
  7116. ctx->base = ctx_base;
  7117. ctx->frame_id = 0;
  7118. ctx->custom_enabled = false;
  7119. ctx->use_frame_header_ts = false;
  7120. ctx->use_default_apply = false;
  7121. ctx->active_req_cnt = 0;
  7122. ctx->reported_req_id = 0;
  7123. ctx->bubble_frame_cnt = 0;
  7124. ctx->req_info.last_bufdone_req_id = 0;
  7125. ctx->v4l2_event_sub_ids = 0;
  7126. ctx->hw_ctx = NULL;
  7127. ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  7128. ctx->substate_machine = cam_isp_ctx_activated_state_machine;
  7129. ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
  7130. ctx->init_timestamp = jiffies_to_msecs(jiffies);
  7131. ctx->isp_device_type = isp_device_type;
  7132. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  7133. ctx->req_base[i].req_priv = &ctx->req_isp[i];
  7134. ctx->req_isp[i].base = &ctx->req_base[i];
  7135. }
  7136. /* camera context setup */
  7137. rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
  7138. crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX, img_iommu_hdl);
  7139. if (rc) {
  7140. CAM_ERR(CAM_ISP, "Camera Context Base init failed");
  7141. goto err;
  7142. }
  7143. /* link camera context with isp context */
  7144. ctx_base->state_machine = cam_isp_ctx_top_state_machine;
  7145. ctx_base->ctx_priv = ctx;
  7146. /* initializing current state for error logging */
  7147. for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
  7148. ctx->cam_isp_ctx_state_monitor[i].curr_state =
  7149. CAM_ISP_CTX_ACTIVATED_MAX;
  7150. }
  7151. atomic64_set(&ctx->state_monitor_head, -1);
  7152. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  7153. atomic64_set(&ctx->event_record_head[i], -1);
  7154. if (!isp_ctx_debug.dentry)
  7155. cam_isp_context_debug_register();
  7156. err:
  7157. return rc;
  7158. }
  7159. int cam_isp_context_deinit(struct cam_isp_context *ctx)
  7160. {
  7161. if (ctx->base)
  7162. cam_context_deinit(ctx->base);
  7163. if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
  7164. CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid",
  7165. __cam_isp_ctx_substate_val_to_type(
  7166. ctx->substate_activated));
  7167. isp_ctx_debug.dentry = NULL;
  7168. memset(ctx, 0, sizeof(*ctx));
  7169. return 0;
  7170. }