cam_isp_context.c 247 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/videodev2.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ratelimit.h>
  11. #include "cam_mem_mgr.h"
  12. #include "cam_sync_api.h"
  13. #include "cam_req_mgr_dev.h"
  14. #include "cam_trace.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_packet_util.h"
  17. #include "cam_context_utils.h"
  18. #include "cam_cdm_util.h"
  19. #include "cam_isp_context.h"
  20. #include "cam_common_util.h"
  21. #include "cam_req_mgr_debug.h"
  22. #include "cam_cpas_api.h"
  23. #include "cam_ife_hw_mgr.h"
  24. static const char isp_dev_name[] = "cam-isp";
  25. static struct cam_isp_ctx_debug isp_ctx_debug;
  26. #define INC_HEAD(head, max_entries, ret) \
  27. div_u64_rem(atomic64_add_return(1, head),\
  28. max_entries, (ret))
  29. static int cam_isp_context_dump_requests(void *data,
  30. void *pf_args);
  31. static int cam_isp_context_hw_recovery(void *priv, void *data);
  32. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  33. struct cam_start_stop_dev_cmd *cmd);
  34. static void __cam_isp_ctx_dump_state_monitor_array(
  35. struct cam_isp_context *ctx_isp);
  36. static const char *__cam_isp_hw_evt_val_to_type(
  37. uint32_t evt_id);
  38. static const char *__cam_isp_ctx_substate_val_to_type(
  39. enum cam_isp_ctx_activated_substate type);
  40. static int __cam_isp_ctx_check_deferred_buf_done(
  41. struct cam_isp_context *ctx_isp,
  42. struct cam_isp_hw_done_event_data *done,
  43. uint32_t bubble_state);
  44. static const char *__cam_isp_evt_val_to_type(
  45. uint32_t evt_id)
  46. {
  47. switch (evt_id) {
  48. case CAM_ISP_CTX_EVENT_SUBMIT:
  49. return "SUBMIT";
  50. case CAM_ISP_CTX_EVENT_APPLY:
  51. return "APPLY";
  52. case CAM_ISP_CTX_EVENT_EPOCH:
  53. return "EPOCH";
  54. case CAM_ISP_CTX_EVENT_RUP:
  55. return "RUP";
  56. case CAM_ISP_CTX_EVENT_BUFDONE:
  57. return "BUFDONE";
  58. default:
  59. return "CAM_ISP_EVENT_INVALID";
  60. }
  61. }
  62. static void __cam_isp_ctx_update_event_record(
  63. struct cam_isp_context *ctx_isp,
  64. enum cam_isp_ctx_event event,
  65. struct cam_ctx_request *req)
  66. {
  67. int iterator = 0;
  68. ktime_t cur_time;
  69. struct cam_isp_ctx_req *req_isp;
  70. if (!ctx_isp) {
  71. CAM_ERR(CAM_ISP, "Invalid Args");
  72. return;
  73. }
  74. switch (event) {
  75. case CAM_ISP_CTX_EVENT_EPOCH:
  76. case CAM_ISP_CTX_EVENT_RUP:
  77. case CAM_ISP_CTX_EVENT_BUFDONE:
  78. break;
  79. case CAM_ISP_CTX_EVENT_SUBMIT:
  80. case CAM_ISP_CTX_EVENT_APPLY:
  81. if (!req) {
  82. CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
  83. return;
  84. }
  85. break;
  86. default:
  87. break;
  88. }
  89. INC_HEAD(&ctx_isp->event_record_head[event],
  90. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
  91. cur_time = ktime_get();
  92. if (req) {
  93. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  94. ctx_isp->event_record[event][iterator].req_id =
  95. req->request_id;
  96. req_isp->event_timestamp[event] = cur_time;
  97. } else {
  98. ctx_isp->event_record[event][iterator].req_id = 0;
  99. }
  100. ctx_isp->event_record[event][iterator].timestamp = cur_time;
  101. }
  102. static int __cam_isp_ctx_handle_sof_freeze_evt(
  103. struct cam_context *ctx)
  104. {
  105. int rc = 0;
  106. struct cam_isp_context *ctx_isp;
  107. struct cam_hw_cmd_args hw_cmd_args;
  108. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  109. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  110. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  111. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  112. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
  113. isp_hw_cmd_args.u.sof_irq_enable = 1;
  114. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  115. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  116. &hw_cmd_args);
  117. ctx_isp->sof_dbg_irq_en = true;
  118. return rc;
  119. }
  120. static void *cam_isp_ctx_user_dump_events(
  121. void *dump_struct, uint8_t *addr_ptr)
  122. {
  123. uint64_t *addr;
  124. struct cam_isp_context_event_record *record;
  125. struct timespec64 ts;
  126. record = (struct cam_isp_context_event_record *)dump_struct;
  127. addr = (uint64_t *)addr_ptr;
  128. ts = ktime_to_timespec64(record->timestamp);
  129. *addr++ = record->req_id;
  130. *addr++ = ts.tv_sec;
  131. *addr++ = ts.tv_nsec / NSEC_PER_USEC;
  132. return addr;
  133. }
  134. static int __cam_isp_ctx_print_event_record(struct cam_isp_context *ctx_isp)
  135. {
  136. int i, j, rc = 0;
  137. int index;
  138. uint32_t oldest_entry, num_entries;
  139. uint64_t state_head;
  140. struct cam_isp_context_event_record *record;
  141. uint32_t len;
  142. uint8_t buf[CAM_ISP_CONTEXT_DBG_BUF_LEN];
  143. struct timespec64 ts;
  144. struct cam_context *ctx = ctx_isp->base;
  145. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  146. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  147. if (state_head == -1) {
  148. return 0;
  149. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  150. num_entries = state_head + 1;
  151. oldest_entry = 0;
  152. } else {
  153. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  154. div_u64_rem(state_head + 1,
  155. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  156. &oldest_entry);
  157. }
  158. index = oldest_entry;
  159. len = 0;
  160. memset(buf, 0, CAM_ISP_CONTEXT_DBG_BUF_LEN);
  161. for (j = 0; j < num_entries; j++) {
  162. record = &ctx_isp->event_record[i][index];
  163. ts = ktime_to_timespec64(record->timestamp);
  164. len += scnprintf(buf + len, CAM_ISP_CONTEXT_DBG_BUF_LEN - len,
  165. "%llu[%lld:%06lld] ", record->req_id, ts.tv_sec,
  166. ts.tv_nsec / NSEC_PER_USEC);
  167. index = (index + 1) %
  168. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  169. }
  170. if (len)
  171. CAM_INFO(CAM_ISP, "Ctx:%d %s: %s",
  172. ctx->ctx_id, __cam_isp_evt_val_to_type(i), buf);
  173. }
  174. return rc;
  175. }
  176. static int __cam_isp_ctx_dump_event_record(
  177. struct cam_isp_context *ctx_isp,
  178. struct cam_common_hw_dump_args *dump_args)
  179. {
  180. int i, j, rc = 0;
  181. int index;
  182. size_t remain_len;
  183. uint32_t oldest_entry, num_entries;
  184. uint32_t min_len;
  185. uint64_t state_head;
  186. struct cam_isp_context_event_record *record;
  187. if (!dump_args || !ctx_isp) {
  188. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  189. dump_args, ctx_isp);
  190. return -EINVAL;
  191. }
  192. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  193. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  194. if (state_head == -1) {
  195. return 0;
  196. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  197. num_entries = state_head + 1;
  198. oldest_entry = 0;
  199. } else {
  200. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  201. div_u64_rem(state_head + 1,
  202. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  203. &oldest_entry);
  204. }
  205. index = oldest_entry;
  206. if (dump_args->buf_len <= dump_args->offset) {
  207. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  208. dump_args->buf_len, dump_args->offset);
  209. return -ENOSPC;
  210. }
  211. min_len = sizeof(struct cam_isp_context_dump_header) +
  212. ((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
  213. sizeof(uint64_t));
  214. remain_len = dump_args->buf_len - dump_args->offset;
  215. if (remain_len < min_len) {
  216. CAM_WARN(CAM_ISP,
  217. "Dump buffer exhaust remain %zu min %u",
  218. remain_len, min_len);
  219. return -ENOSPC;
  220. }
  221. for (j = 0; j < num_entries; j++) {
  222. record = &ctx_isp->event_record[i][index];
  223. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_events,
  224. record, sizeof(uint64_t), "ISP_EVT_%s:",
  225. __cam_isp_evt_val_to_type(i));
  226. if (rc) {
  227. CAM_ERR(CAM_ISP,
  228. "CAM_ISP_CONTEXT DUMP_EVENT_RECORD: Dump failed, rc: %d",
  229. rc);
  230. return rc;
  231. }
  232. index = (index + 1) %
  233. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  234. }
  235. }
  236. return rc;
  237. }
  238. static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req,
  239. uint8_t *start_addr, uint8_t *end_addr,
  240. unsigned long *bytes_updated)
  241. {
  242. struct cam_isp_ctx_req_mini_dump *req_md;
  243. struct cam_buf_io_cfg *io_cfg;
  244. struct cam_isp_ctx_req *req_isp;
  245. struct cam_packet *packet = NULL;
  246. unsigned long bytes_required = 0;
  247. bytes_required = sizeof(*req_md);
  248. *bytes_updated = 0;
  249. if (start_addr + bytes_required > end_addr)
  250. return;
  251. req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr;
  252. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  253. req_md->num_acked = req_isp->num_acked;
  254. req_md->num_deferred_acks = req_isp->num_deferred_acks;
  255. req_md->bubble_report = req_isp->bubble_report;
  256. req_md->bubble_detected = req_isp->bubble_detected;
  257. req_md->reapply_type = req_isp->reapply_type;
  258. req_md->request_id = req->request_id;
  259. *bytes_updated += bytes_required;
  260. if (req_isp->num_fence_map_out) {
  261. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  262. req_isp->num_fence_map_out;
  263. if (start_addr + *bytes_updated + bytes_required > end_addr)
  264. return;
  265. req_md->map_out = (struct cam_hw_fence_map_entry *)
  266. ((uint8_t *)start_addr + *bytes_updated);
  267. memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required);
  268. req_md->num_fence_map_out = req_isp->num_fence_map_out;
  269. *bytes_updated += bytes_required;
  270. }
  271. if (req_isp->num_fence_map_in) {
  272. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  273. req_isp->num_fence_map_in;
  274. if (start_addr + *bytes_updated + bytes_required > end_addr)
  275. return;
  276. req_md->map_in = (struct cam_hw_fence_map_entry *)
  277. ((uint8_t *)start_addr + *bytes_updated);
  278. memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required);
  279. req_md->num_fence_map_in = req_isp->num_fence_map_in;
  280. *bytes_updated += bytes_required;
  281. }
  282. packet = req_isp->hw_update_data.packet;
  283. if (packet && packet->num_io_configs) {
  284. bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg);
  285. if (start_addr + *bytes_updated + bytes_required > end_addr)
  286. return;
  287. io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
  288. packet->io_configs_offset / 4);
  289. req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated);
  290. memcpy(req_md->io_cfg, io_cfg, bytes_required);
  291. *bytes_updated += bytes_required;
  292. req_md->num_io_cfg = packet->num_io_configs;
  293. }
  294. }
  295. static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
  296. {
  297. struct cam_isp_ctx_mini_dump_info *md;
  298. struct cam_isp_context *ctx_isp;
  299. struct cam_context *ctx;
  300. struct cam_ctx_request *req, *req_temp;
  301. struct cam_hw_mini_dump_args *dump_args;
  302. uint8_t *start_addr;
  303. uint8_t *end_addr;
  304. unsigned long total_bytes = 0;
  305. unsigned long bytes_updated = 0;
  306. uint32_t i;
  307. if (!priv || !args) {
  308. CAM_ERR(CAM_ISP, "invalid params");
  309. return 0;
  310. }
  311. dump_args = (struct cam_hw_mini_dump_args *)args;
  312. if (dump_args->len < sizeof(*md)) {
  313. CAM_ERR(CAM_ISP,
  314. "In sufficient size received %lu required size: %zu",
  315. dump_args->len, sizeof(*md));
  316. return 0;
  317. }
  318. ctx = (struct cam_context *)priv;
  319. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  320. start_addr = (uint8_t *)dump_args->start_addr;
  321. end_addr = start_addr + dump_args->len;
  322. md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr;
  323. md->sof_timestamp_val = ctx_isp->sof_timestamp_val;
  324. md->boot_timestamp = ctx_isp->boot_timestamp;
  325. md->last_sof_timestamp = ctx_isp->last_sof_timestamp;
  326. md->init_timestamp = ctx_isp->init_timestamp;
  327. md->frame_id = ctx_isp->frame_id;
  328. md->reported_req_id = ctx_isp->reported_req_id;
  329. md->last_applied_req_id = ctx_isp->last_applied_req_id;
  330. md->last_bufdone_err_apply_req_id =
  331. ctx_isp->last_bufdone_err_apply_req_id;
  332. md->frame_id_meta = ctx_isp->frame_id_meta;
  333. md->substate_activated = ctx_isp->substate_activated;
  334. md->ctx_id = ctx->ctx_id;
  335. md->subscribe_event = ctx_isp->subscribe_event;
  336. md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt;
  337. md->isp_device_type = ctx_isp->isp_device_type;
  338. md->active_req_cnt = ctx_isp->active_req_cnt;
  339. md->trigger_id = ctx_isp->trigger_id;
  340. md->rdi_only_context = ctx_isp->rdi_only_context;
  341. md->offline_context = ctx_isp->offline_context;
  342. md->hw_acquired = ctx_isp->hw_acquired;
  343. md->init_received = ctx_isp->init_received;
  344. md->split_acquire = ctx_isp->split_acquire;
  345. md->use_frame_header_ts = ctx_isp->use_frame_header_ts;
  346. md->support_consumed_addr = ctx_isp->support_consumed_addr;
  347. md->use_default_apply = ctx_isp->use_default_apply;
  348. md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress);
  349. md->process_bubble = atomic_read(&ctx_isp->process_bubble);
  350. md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
  351. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  352. memcpy(md->event_record[i], ctx_isp->event_record[i],
  353. sizeof(struct cam_isp_context_event_record) *
  354. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
  355. }
  356. total_bytes += sizeof(*md);
  357. if (start_addr + total_bytes >= end_addr)
  358. goto end;
  359. if (!list_empty(&ctx->active_req_list)) {
  360. md->active_list = (struct cam_isp_ctx_req_mini_dump *)
  361. (start_addr + total_bytes);
  362. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  363. bytes_updated = 0;
  364. __cam_isp_ctx_req_mini_dump(req,
  365. (uint8_t *)&md->active_list[md->active_cnt++],
  366. end_addr, &bytes_updated);
  367. total_bytes += bytes_updated;
  368. if ((start_addr + total_bytes >= end_addr))
  369. goto end;
  370. }
  371. }
  372. if (!list_empty(&ctx->wait_req_list)) {
  373. md->wait_list = (struct cam_isp_ctx_req_mini_dump *)
  374. (start_addr + total_bytes);
  375. list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
  376. bytes_updated = 0;
  377. __cam_isp_ctx_req_mini_dump(req,
  378. (uint8_t *)&md->wait_list[md->wait_cnt++],
  379. end_addr, &bytes_updated);
  380. total_bytes += bytes_updated;
  381. if ((start_addr + total_bytes >= end_addr))
  382. goto end;
  383. }
  384. }
  385. if (!list_empty(&ctx->pending_req_list)) {
  386. md->pending_list = (struct cam_isp_ctx_req_mini_dump *)
  387. (start_addr + total_bytes);
  388. list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
  389. bytes_updated = 0;
  390. __cam_isp_ctx_req_mini_dump(req,
  391. (uint8_t *)&md->pending_list[md->pending_cnt++],
  392. end_addr, &bytes_updated);
  393. total_bytes += bytes_updated;
  394. if ((start_addr + total_bytes >= end_addr))
  395. goto end;
  396. }
  397. }
  398. end:
  399. dump_args->bytes_written = total_bytes;
  400. return 0;
  401. }
  402. static void __cam_isp_ctx_update_state_monitor_array(
  403. struct cam_isp_context *ctx_isp,
  404. enum cam_isp_state_change_trigger trigger_type,
  405. uint64_t req_id)
  406. {
  407. int iterator;
  408. INC_HEAD(&ctx_isp->state_monitor_head,
  409. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
  410. ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
  411. ctx_isp->substate_activated;
  412. ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
  413. ctx_isp->frame_id;
  414. ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
  415. trigger_type;
  416. ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
  417. req_id;
  418. ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
  419. jiffies_to_msecs(jiffies) - ctx_isp->init_timestamp;
  420. }
  421. static const char *__cam_isp_ctx_substate_val_to_type(
  422. enum cam_isp_ctx_activated_substate type)
  423. {
  424. switch (type) {
  425. case CAM_ISP_CTX_ACTIVATED_SOF:
  426. return "SOF";
  427. case CAM_ISP_CTX_ACTIVATED_APPLIED:
  428. return "APPLIED";
  429. case CAM_ISP_CTX_ACTIVATED_EPOCH:
  430. return "EPOCH";
  431. case CAM_ISP_CTX_ACTIVATED_BUBBLE:
  432. return "BUBBLE";
  433. case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
  434. return "BUBBLE_APPLIED";
  435. case CAM_ISP_CTX_ACTIVATED_HW_ERROR:
  436. return "HW_ERROR";
  437. case CAM_ISP_CTX_ACTIVATED_HALT:
  438. return "HALT";
  439. default:
  440. return "INVALID";
  441. }
  442. }
  443. static const char *__cam_isp_hw_evt_val_to_type(
  444. uint32_t evt_id)
  445. {
  446. switch (evt_id) {
  447. case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
  448. return "ERROR";
  449. case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED:
  450. return "APPLIED";
  451. case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
  452. return "SOF";
  453. case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
  454. return "REG_UPDATE";
  455. case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
  456. return "EPOCH";
  457. case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
  458. return "EOF";
  459. case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
  460. return "DONE";
  461. case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
  462. return "FLUSH";
  463. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF:
  464. return "SEC_EVT_SOF";
  465. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH:
  466. return "SEC_EVT_EPOCH";
  467. case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP:
  468. return "OUT_OF_SYNC_FRAME_DROP";
  469. default:
  470. return "CAM_ISP_EVENT_INVALID";
  471. }
  472. }
  473. static void __cam_isp_ctx_dump_state_monitor_array(
  474. struct cam_isp_context *ctx_isp)
  475. {
  476. int i = 0;
  477. int64_t state_head = 0;
  478. uint32_t index, num_entries, oldest_entry;
  479. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  480. if (state_head == -1) {
  481. return;
  482. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  483. num_entries = state_head;
  484. oldest_entry = 0;
  485. } else {
  486. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  487. div_u64_rem(state_head + 1,
  488. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  489. }
  490. CAM_ERR(CAM_ISP,
  491. "Dumping state information for preceding requests");
  492. index = oldest_entry;
  493. for (i = 0; i < num_entries; i++) {
  494. CAM_ERR(CAM_ISP,
  495. "Index[%d] time[%d] : Substate[%s] Frame[%lld] ReqId[%llu] evt_type[%s]",
  496. index,
  497. ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
  498. __cam_isp_ctx_substate_val_to_type(
  499. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  500. ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
  501. ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
  502. __cam_isp_hw_evt_val_to_type(
  503. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  504. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  505. }
  506. }
  507. static void *cam_isp_ctx_user_dump_state_monitor_array_info(
  508. void *dump_struct, uint8_t *addr_ptr)
  509. {
  510. struct cam_isp_context_state_monitor *evt = NULL;
  511. uint64_t *addr;
  512. evt = (struct cam_isp_context_state_monitor *)dump_struct;
  513. addr = (uint64_t *)addr_ptr;
  514. *addr++ = evt->evt_time_stamp;
  515. *addr++ = evt->frame_id;
  516. *addr++ = evt->req_id;
  517. return addr;
  518. }
  519. static int __cam_isp_ctx_user_dump_state_monitor_array(
  520. struct cam_isp_context *ctx_isp,
  521. struct cam_common_hw_dump_args *dump_args)
  522. {
  523. int i, rc = 0;
  524. int index;
  525. uint32_t oldest_entry;
  526. uint32_t num_entries;
  527. uint64_t state_head;
  528. if (!dump_args || !ctx_isp) {
  529. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  530. dump_args, ctx_isp);
  531. return -EINVAL;
  532. }
  533. state_head = 0;
  534. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  535. if (state_head == -1) {
  536. return 0;
  537. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  538. num_entries = state_head;
  539. oldest_entry = 0;
  540. } else {
  541. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  542. div_u64_rem(state_head + 1,
  543. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  544. }
  545. CAM_ERR(CAM_ISP,
  546. "Dumping state information for preceding requests");
  547. index = oldest_entry;
  548. for (i = 0; i < num_entries; i++) {
  549. rc = cam_common_user_dump_helper(dump_args,
  550. cam_isp_ctx_user_dump_state_monitor_array_info,
  551. &ctx_isp->cam_isp_ctx_state_monitor[index],
  552. sizeof(uint64_t), "ISP_STATE_MONITOR.%s.%s:",
  553. __cam_isp_ctx_substate_val_to_type(
  554. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  555. __cam_isp_hw_evt_val_to_type(
  556. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  557. if (rc) {
  558. CAM_ERR(CAM_ISP, "CAM ISP CONTEXT: Event record dump failed, rc: %d", rc);
  559. return rc;
  560. }
  561. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  562. }
  563. return rc;
  564. }
  565. static int cam_isp_context_info_dump(void *context,
  566. enum cam_context_dump_id id)
  567. {
  568. struct cam_context *ctx = (struct cam_context *)context;
  569. switch (id) {
  570. case CAM_CTX_DUMP_ACQ_INFO: {
  571. cam_context_dump_hw_acq_info(ctx);
  572. break;
  573. }
  574. default:
  575. CAM_DBG(CAM_ISP, "DUMP id not valid %u", id);
  576. break;
  577. }
  578. return 0;
  579. }
  580. static const char *__cam_isp_ctx_crm_trigger_point_to_string(
  581. int trigger_point)
  582. {
  583. switch (trigger_point) {
  584. case CAM_TRIGGER_POINT_SOF:
  585. return "SOF";
  586. case CAM_TRIGGER_POINT_EOF:
  587. return "EOF";
  588. default:
  589. return "Invalid";
  590. }
  591. }
  592. static int __cam_isp_ctx_notify_trigger_util(
  593. int trigger_type, struct cam_isp_context *ctx_isp)
  594. {
  595. int rc = -EINVAL;
  596. struct cam_context *ctx = ctx_isp->base;
  597. struct cam_req_mgr_trigger_notify notify;
  598. /* Trigger type not supported, return */
  599. if (!(ctx_isp->subscribe_event & trigger_type)) {
  600. CAM_DBG(CAM_ISP,
  601. "%s trigger point not subscribed for in mask: %u in ctx: %u on link: 0x%x last_bufdone: %lld",
  602. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  603. ctx_isp->subscribe_event, ctx->ctx_id, ctx->link_hdl,
  604. ctx_isp->req_info.last_bufdone_req_id);
  605. return 0;
  606. }
  607. /* Skip CRM notify when recovery is in progress */
  608. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  609. CAM_DBG(CAM_ISP,
  610. "Internal recovery in progress skip notifying %s trigger point in ctx: %u on link: 0x%x",
  611. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  612. ctx->ctx_id, ctx->link_hdl);
  613. return 0;
  614. }
  615. notify.link_hdl = ctx->link_hdl;
  616. notify.dev_hdl = ctx->dev_hdl;
  617. notify.frame_id = ctx_isp->frame_id;
  618. notify.trigger = trigger_type;
  619. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  620. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  621. notify.trigger_id = ctx_isp->trigger_id;
  622. CAM_DBG(CAM_ISP,
  623. "Notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld",
  624. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  625. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  626. ctx_isp->req_info.last_bufdone_req_id);
  627. rc = ctx->ctx_crm_intf->notify_trigger(&notify);
  628. if (rc)
  629. CAM_ERR(CAM_ISP,
  630. "Failed to notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld rc: %d",
  631. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  632. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  633. ctx_isp->req_info.last_bufdone_req_id, rc);
  634. return rc;
  635. }
  636. static int __cam_isp_ctx_notify_v4l2_error_event(
  637. uint32_t error_type, uint32_t error_code,
  638. uint64_t error_request_id, struct cam_context *ctx)
  639. {
  640. int rc = 0;
  641. struct cam_req_mgr_message req_msg;
  642. req_msg.session_hdl = ctx->session_hdl;
  643. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  644. req_msg.u.err_msg.error_type = error_type;
  645. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  646. req_msg.u.err_msg.request_id = error_request_id;
  647. req_msg.u.err_msg.resource_size = 0x0;
  648. req_msg.u.err_msg.error_code = error_code;
  649. CAM_DBG(CAM_ISP,
  650. "v4l2 error event [type: %u code: %u] for req: %llu in ctx: %u on link: 0x%x notified successfully",
  651. error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl);
  652. rc = cam_req_mgr_notify_message(&req_msg,
  653. V4L_EVENT_CAM_REQ_MGR_ERROR,
  654. V4L_EVENT_CAM_REQ_MGR_EVENT);
  655. if (rc)
  656. CAM_ERR(CAM_ISP,
  657. "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu in ctx %u on link: 0x%x",
  658. error_request_id, ctx->ctx_id);
  659. return rc;
  660. }
  661. static int __cam_isp_ctx_notify_error_util(
  662. uint32_t trigger_type, enum cam_req_mgr_device_error error,
  663. uint64_t req_id, struct cam_isp_context *ctx_isp)
  664. {
  665. int rc = -EINVAL;
  666. struct cam_context *ctx = ctx_isp->base;
  667. struct cam_req_mgr_error_notify notify;
  668. notify.link_hdl = ctx->link_hdl;
  669. notify.dev_hdl = ctx->dev_hdl;
  670. notify.req_id = req_id;
  671. notify.error = error;
  672. notify.trigger = trigger_type;
  673. notify.frame_id = ctx_isp->frame_id;
  674. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  675. if (error == CRM_KMD_ERR_BUBBLE)
  676. CAM_WARN(CAM_ISP,
  677. "Notify CRM about bubble req: %llu frame: %llu in ctx: %u on link: 0x%x",
  678. req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  679. else
  680. CAM_ERR(CAM_ISP,
  681. "Notify CRM about fatal error: %u req: %llu frame: %llu in ctx: %u on link: 0x%x",
  682. error, req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  683. rc = ctx->ctx_crm_intf->notify_err(&notify);
  684. if (rc)
  685. CAM_ERR(CAM_ISP,
  686. "Failed to notify error: %u for req: %lu on ctx: %u in link: 0x%x",
  687. error, req_id, ctx->ctx_id, ctx->link_hdl);
  688. return rc;
  689. }
  690. static int __cam_isp_ctx_trigger_reg_dump(
  691. enum cam_hw_mgr_command cmd,
  692. struct cam_context *ctx)
  693. {
  694. int rc = 0;
  695. struct cam_hw_cmd_args hw_cmd_args;
  696. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  697. hw_cmd_args.cmd_type = cmd;
  698. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  699. &hw_cmd_args);
  700. if (rc) {
  701. CAM_ERR(CAM_ISP, "Reg dump on error failed ctx: %u link: 0x%x rc: %d",
  702. ctx->ctx_id, ctx->link_hdl, rc);
  703. goto end;
  704. }
  705. CAM_DBG(CAM_ISP,
  706. "Reg dump type: %u successful in ctx: %u on link: 0x%x",
  707. cmd, ctx->ctx_id, ctx->link_hdl);
  708. end:
  709. return rc;
  710. }
  711. static int __cam_isp_ctx_pause_crm_timer(
  712. struct cam_context *ctx)
  713. {
  714. int rc = -EINVAL;
  715. struct cam_req_mgr_timer_notify timer;
  716. if (!ctx || !ctx->ctx_crm_intf)
  717. goto end;
  718. timer.link_hdl = ctx->link_hdl;
  719. timer.dev_hdl = ctx->dev_hdl;
  720. timer.state = false;
  721. rc = ctx->ctx_crm_intf->notify_timer(&timer);
  722. if (rc) {
  723. CAM_ERR(CAM_ISP, "Failed to pause sof timer in ctx: %u on link: 0x%x",
  724. ctx->ctx_id, ctx->link_hdl);
  725. goto end;
  726. }
  727. CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx: %u link: 0x%x success",
  728. ctx->ctx_id, ctx->link_hdl);
  729. end:
  730. return rc;
  731. }
  732. static inline void __cam_isp_ctx_update_sof_ts_util(
  733. struct cam_isp_hw_sof_event_data *sof_event_data,
  734. struct cam_isp_context *ctx_isp)
  735. {
  736. /* Delayed update, skip if ts is already updated */
  737. if (ctx_isp->sof_timestamp_val == sof_event_data->timestamp)
  738. return;
  739. ctx_isp->frame_id++;
  740. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  741. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  742. }
  743. static int cam_isp_ctx_dump_req(
  744. struct cam_isp_ctx_req *req_isp,
  745. uintptr_t cpu_addr,
  746. size_t buf_len,
  747. size_t *offset,
  748. bool dump_to_buff)
  749. {
  750. int i, rc = 0;
  751. size_t len = 0;
  752. uint32_t *buf_addr;
  753. uint32_t *buf_start, *buf_end;
  754. size_t remain_len = 0;
  755. struct cam_cdm_cmd_buf_dump_info dump_info;
  756. for (i = 0; i < req_isp->num_cfg; i++) {
  757. rc = cam_packet_util_get_cmd_mem_addr(
  758. req_isp->cfg[i].handle, &buf_addr, &len);
  759. if (rc) {
  760. CAM_ERR_RATE_LIMIT(CAM_ISP,
  761. "Failed to get_cmd_mem_addr, rc=%d",
  762. rc);
  763. } else {
  764. if (req_isp->cfg[i].offset >= ((uint32_t)len)) {
  765. CAM_ERR(CAM_ISP,
  766. "Invalid offset exp %u actual %u",
  767. req_isp->cfg[i].offset, (uint32_t)len);
  768. return -EINVAL;
  769. }
  770. remain_len = len - req_isp->cfg[i].offset;
  771. if (req_isp->cfg[i].len >
  772. ((uint32_t)remain_len)) {
  773. CAM_ERR(CAM_ISP,
  774. "Invalid len exp %u remain_len %u",
  775. req_isp->cfg[i].len,
  776. (uint32_t)remain_len);
  777. return -EINVAL;
  778. }
  779. buf_start = (uint32_t *)((uint8_t *) buf_addr +
  780. req_isp->cfg[i].offset);
  781. buf_end = (uint32_t *)((uint8_t *) buf_start +
  782. req_isp->cfg[i].len - 1);
  783. if (dump_to_buff) {
  784. if (!cpu_addr || !offset || !buf_len) {
  785. CAM_ERR(CAM_ISP, "Invalid args");
  786. break;
  787. }
  788. dump_info.src_start = buf_start;
  789. dump_info.src_end = buf_end;
  790. dump_info.dst_start = cpu_addr;
  791. dump_info.dst_offset = *offset;
  792. dump_info.dst_max_size = buf_len;
  793. rc = cam_cdm_util_dump_cmd_bufs_v2(
  794. &dump_info);
  795. *offset = dump_info.dst_offset;
  796. if (rc)
  797. return rc;
  798. } else
  799. cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
  800. }
  801. }
  802. return rc;
  803. }
  804. static int __cam_isp_ctx_enqueue_request_in_order(
  805. struct cam_context *ctx, struct cam_ctx_request *req, bool lock)
  806. {
  807. struct cam_ctx_request *req_current;
  808. struct cam_ctx_request *req_prev;
  809. struct list_head temp_list;
  810. struct cam_isp_context *ctx_isp;
  811. INIT_LIST_HEAD(&temp_list);
  812. if (lock)
  813. spin_lock_bh(&ctx->lock);
  814. if (list_empty(&ctx->pending_req_list)) {
  815. list_add_tail(&req->list, &ctx->pending_req_list);
  816. } else {
  817. list_for_each_entry_safe_reverse(
  818. req_current, req_prev, &ctx->pending_req_list, list) {
  819. if (req->request_id < req_current->request_id) {
  820. list_del_init(&req_current->list);
  821. list_add(&req_current->list, &temp_list);
  822. continue;
  823. } else if (req->request_id == req_current->request_id) {
  824. CAM_WARN(CAM_ISP,
  825. "Received duplicated request %lld",
  826. req->request_id);
  827. }
  828. break;
  829. }
  830. list_add_tail(&req->list, &ctx->pending_req_list);
  831. if (!list_empty(&temp_list)) {
  832. list_for_each_entry_safe(
  833. req_current, req_prev, &temp_list, list) {
  834. list_del_init(&req_current->list);
  835. list_add_tail(&req_current->list,
  836. &ctx->pending_req_list);
  837. }
  838. }
  839. }
  840. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  841. __cam_isp_ctx_update_event_record(ctx_isp,
  842. CAM_ISP_CTX_EVENT_SUBMIT, req);
  843. if (lock)
  844. spin_unlock_bh(&ctx->lock);
  845. return 0;
  846. }
  847. static int __cam_isp_ctx_enqueue_init_request(
  848. struct cam_context *ctx, struct cam_ctx_request *req)
  849. {
  850. int rc = 0;
  851. struct cam_ctx_request *req_old;
  852. struct cam_isp_ctx_req *req_isp_old;
  853. struct cam_isp_ctx_req *req_isp_new;
  854. struct cam_isp_prepare_hw_update_data *req_update_old;
  855. struct cam_isp_prepare_hw_update_data *req_update_new;
  856. struct cam_isp_prepare_hw_update_data *hw_update_data;
  857. spin_lock_bh(&ctx->lock);
  858. if (list_empty(&ctx->pending_req_list)) {
  859. list_add_tail(&req->list, &ctx->pending_req_list);
  860. CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
  861. req->request_id);
  862. goto end;
  863. }
  864. req_old = list_first_entry(&ctx->pending_req_list,
  865. struct cam_ctx_request, list);
  866. req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
  867. req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
  868. if (req_isp_old->hw_update_data.packet_opcode_type ==
  869. CAM_ISP_PACKET_INIT_DEV) {
  870. if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
  871. ctx->max_hw_update_entries) {
  872. CAM_WARN(CAM_ISP,
  873. "Can not merge INIT pkt num_cfgs = %d",
  874. (req_isp_old->num_cfg +
  875. req_isp_new->num_cfg));
  876. rc = -ENOMEM;
  877. }
  878. if (req_isp_old->num_fence_map_out != 0 ||
  879. req_isp_old->num_fence_map_in != 0) {
  880. CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
  881. rc = -EINVAL;
  882. }
  883. if (!rc) {
  884. memcpy(req_isp_old->fence_map_out,
  885. req_isp_new->fence_map_out,
  886. sizeof(req_isp_new->fence_map_out[0])*
  887. req_isp_new->num_fence_map_out);
  888. req_isp_old->num_fence_map_out =
  889. req_isp_new->num_fence_map_out;
  890. memcpy(req_isp_old->fence_map_in,
  891. req_isp_new->fence_map_in,
  892. sizeof(req_isp_new->fence_map_in[0])*
  893. req_isp_new->num_fence_map_in);
  894. req_isp_old->num_fence_map_in =
  895. req_isp_new->num_fence_map_in;
  896. memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
  897. req_isp_new->cfg,
  898. sizeof(req_isp_new->cfg[0]) *
  899. req_isp_new->num_cfg);
  900. req_isp_old->num_cfg += req_isp_new->num_cfg;
  901. memcpy(&req_old->pf_data, &req->pf_data,
  902. sizeof(struct cam_hw_mgr_pf_request_info));
  903. if (req_isp_new->hw_update_data.num_reg_dump_buf) {
  904. req_update_new = &req_isp_new->hw_update_data;
  905. req_update_old = &req_isp_old->hw_update_data;
  906. memcpy(&req_update_old->reg_dump_buf_desc,
  907. &req_update_new->reg_dump_buf_desc,
  908. sizeof(struct cam_cmd_buf_desc) *
  909. req_update_new->num_reg_dump_buf);
  910. req_update_old->num_reg_dump_buf =
  911. req_update_new->num_reg_dump_buf;
  912. }
  913. /* Update HW update params for ePCR */
  914. hw_update_data = &req_isp_new->hw_update_data;
  915. req_isp_old->hw_update_data.frame_header_res_id =
  916. req_isp_new->hw_update_data.frame_header_res_id;
  917. req_isp_old->hw_update_data.frame_header_cpu_addr =
  918. hw_update_data->frame_header_cpu_addr;
  919. if (req_isp_new->hw_update_data.mup_en) {
  920. req_isp_old->hw_update_data.mup_en =
  921. req_isp_new->hw_update_data.mup_en;
  922. req_isp_old->hw_update_data.mup_val =
  923. req_isp_new->hw_update_data.mup_val;
  924. req_isp_old->hw_update_data.num_exp =
  925. req_isp_new->hw_update_data.num_exp;
  926. }
  927. req_old->request_id = req->request_id;
  928. list_add_tail(&req->list, &ctx->free_req_list);
  929. }
  930. } else {
  931. CAM_WARN(CAM_ISP,
  932. "Received Update pkt before INIT pkt. req_id= %lld",
  933. req->request_id);
  934. rc = -EINVAL;
  935. }
  936. end:
  937. spin_unlock_bh(&ctx->lock);
  938. return rc;
  939. }
  940. static char *__cam_isp_ife_sfe_resource_handle_id_to_type(
  941. uint32_t resource_handle)
  942. {
  943. switch (resource_handle) {
  944. /* IFE output ports */
  945. case CAM_ISP_IFE_OUT_RES_FULL: return "IFE_FULL";
  946. case CAM_ISP_IFE_OUT_RES_DS4: return "IFE_DS4";
  947. case CAM_ISP_IFE_OUT_RES_DS16: return "IFE_DS16";
  948. case CAM_ISP_IFE_OUT_RES_RAW_DUMP: return "IFE_RAW_DUMP";
  949. case CAM_ISP_IFE_OUT_RES_FD: return "IFE_FD";
  950. case CAM_ISP_IFE_OUT_RES_PDAF: return "IFE_PDAF";
  951. case CAM_ISP_IFE_OUT_RES_RDI_0: return "IFE_RDI_0";
  952. case CAM_ISP_IFE_OUT_RES_RDI_1: return "IFE_RDI_1";
  953. case CAM_ISP_IFE_OUT_RES_RDI_2: return "IFE_RDI_2";
  954. case CAM_ISP_IFE_OUT_RES_RDI_3: return "IFE_RDI_3";
  955. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: return "IFE_STATS_HDR_BE";
  956. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: return "IFE_STATS_HDR_BHIST";
  957. case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: return "IFE_STATS_TL_BG";
  958. case CAM_ISP_IFE_OUT_RES_STATS_BF: return "IFE_STATS_BF";
  959. case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: return "IFE_STATS_AWB_BG";
  960. case CAM_ISP_IFE_OUT_RES_STATS_BHIST: return "IFE_STATS_BHIST";
  961. case CAM_ISP_IFE_OUT_RES_STATS_RS: return "IFE_STATS_RS";
  962. case CAM_ISP_IFE_OUT_RES_STATS_CS: return "IFE_STATS_CS";
  963. case CAM_ISP_IFE_OUT_RES_STATS_IHIST: return "IFE_STATS_IHIST";
  964. case CAM_ISP_IFE_OUT_RES_FULL_DISP: return "IFE_FULL_DISP";
  965. case CAM_ISP_IFE_OUT_RES_DS4_DISP: return "IFE_DS4_DISP";
  966. case CAM_ISP_IFE_OUT_RES_DS16_DISP: return "IFE_DS16_DISP";
  967. case CAM_ISP_IFE_OUT_RES_2PD: return "IFE_2PD";
  968. case CAM_ISP_IFE_OUT_RES_LCR: return "IFE_LCR";
  969. case CAM_ISP_IFE_OUT_RES_AWB_BFW: return "IFE_AWB_BFW";
  970. case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: return "IFE_PREPROCESS_2PD";
  971. case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: return "IFE_STATS_AEC_BE";
  972. case CAM_ISP_IFE_OUT_RES_LTM_STATS: return "IFE_LTM_STATS";
  973. case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: return "IFE_STATS_GTM_BHIST";
  974. case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: return "IFE_STATS_BG";
  975. case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: return "IFE_PREPROCESS_RAW";
  976. case CAM_ISP_IFE_OUT_RES_SPARSE_PD: return "IFE_SPARSE_PD";
  977. case CAM_ISP_IFE_OUT_RES_STATS_CAF: return "IFE_STATS_CAF";
  978. case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: return "IFE_STATS_BAYER_RS";
  979. case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: return "IFE_PDAF_PARSED_DATA";
  980. case CAM_ISP_IFE_OUT_RES_STATS_ALSC: return "IFE_STATS_ALSC";
  981. /* SFE output ports */
  982. case CAM_ISP_SFE_OUT_RES_RDI_0: return "SFE_RDI_0";
  983. case CAM_ISP_SFE_OUT_RES_RDI_1: return "SFE_RDI_1";
  984. case CAM_ISP_SFE_OUT_RES_RDI_2: return "SFE_RDI_2";
  985. case CAM_ISP_SFE_OUT_RES_RDI_3: return "SFE_RDI_3";
  986. case CAM_ISP_SFE_OUT_RES_RDI_4: return "SFE_RDI_4";
  987. case CAM_ISP_SFE_OUT_BE_STATS_0: return "SFE_BE_STATS_0";
  988. case CAM_ISP_SFE_OUT_BE_STATS_1: return "SFE_BE_STATS_1";
  989. case CAM_ISP_SFE_OUT_BE_STATS_2: return "SFE_BE_STATS_2";
  990. case CAM_ISP_SFE_OUT_BHIST_STATS_0: return "SFE_BHIST_STATS_0";
  991. case CAM_ISP_SFE_OUT_BHIST_STATS_1: return "SFE_BHIST_STATS_1";
  992. case CAM_ISP_SFE_OUT_BHIST_STATS_2: return "SFE_BHIST_STATS_2";
  993. case CAM_ISP_SFE_OUT_RES_LCR: return "SFE_LCR";
  994. case CAM_ISP_SFE_OUT_RES_RAW_DUMP: return "SFE_PROCESSED_RAW";
  995. case CAM_ISP_SFE_OUT_RES_IR: return "SFE_IR";
  996. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: return "SFE_RS_STATS_0";
  997. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: return "SFE_RS_STATS_1";
  998. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: return "SFE_RS_STATS_2";
  999. case CAM_ISP_SFE_OUT_HDR_STATS: return "HDR_STATS";
  1000. /* SFE input ports */
  1001. case CAM_ISP_SFE_IN_RD_0: return "SFE_RD_0";
  1002. case CAM_ISP_SFE_IN_RD_1: return "SFE_RD_1";
  1003. case CAM_ISP_SFE_IN_RD_2: return "SFE_RD_2";
  1004. /* Handle invalid type */
  1005. default: return "Invalid_Resource_Type";
  1006. }
  1007. }
  1008. static const char *__cam_isp_tfe_resource_handle_id_to_type(
  1009. uint32_t resource_handle)
  1010. {
  1011. switch (resource_handle) {
  1012. /* TFE output ports */
  1013. case CAM_ISP_TFE_OUT_RES_FULL: return "TFE_FULL";
  1014. case CAM_ISP_TFE_OUT_RES_RAW_DUMP: return "TFE_RAW_DUMP";
  1015. case CAM_ISP_TFE_OUT_RES_PDAF: return "TFE_PDAF";
  1016. case CAM_ISP_TFE_OUT_RES_RDI_0: return "TFE_RDI_0";
  1017. case CAM_ISP_TFE_OUT_RES_RDI_1: return "TFE_RDI_1";
  1018. case CAM_ISP_TFE_OUT_RES_RDI_2: return "TFE_RDI_2";
  1019. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: return "TFE_STATS_HDR_BE";
  1020. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: return "TFE_STATS_HDR_BHIST";
  1021. case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: return "TFE_STATS_TL_BG";
  1022. case CAM_ISP_TFE_OUT_RES_STATS_BF: return "TFE_STATS_BF";
  1023. case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: return "TFE_STATS_AWB_BG";
  1024. case CAM_ISP_TFE_OUT_RES_STATS_RS: return "TFE_STATS_RS";
  1025. case CAM_ISP_TFE_OUT_RES_DS4: return "TFE_DS_4";
  1026. case CAM_ISP_TFE_OUT_RES_DS16: return "TFE_DS_16";
  1027. case CAM_ISP_TFE_OUT_RES_AI: return "TFE_AI";
  1028. /* Handle invalid type */
  1029. default: return "Invalid_Resource_Type";
  1030. }
  1031. }
  1032. static const char *__cam_isp_resource_handle_id_to_type(
  1033. uint32_t device_type, uint32_t resource_handle)
  1034. {
  1035. switch (device_type) {
  1036. case CAM_IFE_DEVICE_TYPE:
  1037. return __cam_isp_ife_sfe_resource_handle_id_to_type(resource_handle);
  1038. case CAM_TFE_DEVICE_TYPE:
  1039. return __cam_isp_tfe_resource_handle_id_to_type(resource_handle);
  1040. default:
  1041. return "INVALID_DEV_TYPE";
  1042. }
  1043. }
  1044. static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
  1045. {
  1046. uint64_t ts = 0;
  1047. if (!evt_data)
  1048. return 0;
  1049. switch (evt_id) {
  1050. case CAM_ISP_HW_EVENT_ERROR:
  1051. ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
  1052. timestamp;
  1053. break;
  1054. case CAM_ISP_HW_EVENT_SOF:
  1055. ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
  1056. timestamp;
  1057. break;
  1058. case CAM_ISP_HW_EVENT_REG_UPDATE:
  1059. ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
  1060. timestamp;
  1061. break;
  1062. case CAM_ISP_HW_EVENT_EPOCH:
  1063. ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
  1064. timestamp;
  1065. break;
  1066. case CAM_ISP_HW_EVENT_EOF:
  1067. ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
  1068. timestamp;
  1069. break;
  1070. case CAM_ISP_HW_EVENT_DONE:
  1071. case CAM_ISP_HW_SECONDARY_EVENT:
  1072. break;
  1073. default:
  1074. CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
  1075. }
  1076. return ts;
  1077. }
  1078. static int __cam_isp_ctx_get_hw_timestamp(struct cam_context *ctx, uint64_t *prev_ts,
  1079. uint64_t *curr_ts, uint64_t *boot_ts)
  1080. {
  1081. struct cam_hw_cmd_args hw_cmd_args;
  1082. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  1083. int rc;
  1084. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  1085. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  1086. hw_cmd_args.u.internal_args = &isp_hw_cmd_args;
  1087. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_SOF_TS;
  1088. rc = ctx->hw_mgr_intf->hw_cmd(ctx->ctxt_to_hw_map, &hw_cmd_args);
  1089. if (rc)
  1090. return rc;
  1091. if (isp_hw_cmd_args.u.sof_ts.prev >= isp_hw_cmd_args.u.sof_ts.curr) {
  1092. CAM_ERR(CAM_ISP, "ctx:%u previous timestamp is greater than current timestamp",
  1093. ctx->ctx_id);
  1094. return -EINVAL;
  1095. }
  1096. *prev_ts = isp_hw_cmd_args.u.sof_ts.prev;
  1097. *curr_ts = isp_hw_cmd_args.u.sof_ts.curr;
  1098. *boot_ts = isp_hw_cmd_args.u.sof_ts.boot;
  1099. return 0;
  1100. }
  1101. static int __cam_isp_ctx_recover_sof_timestamp(struct cam_context *ctx, uint64_t request_id)
  1102. {
  1103. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  1104. uint64_t prev_ts, curr_ts, boot_ts;
  1105. uint64_t a, b, c;
  1106. int rc;
  1107. rc = __cam_isp_ctx_get_hw_timestamp(ctx, &prev_ts, &curr_ts, &boot_ts);
  1108. if (rc) {
  1109. CAM_ERR(CAM_ISP, "ctx:%u Failed to get timestamp from HW", ctx->ctx_id);
  1110. return rc;
  1111. }
  1112. /**
  1113. * If the last received SOF was for frame A and we have missed the SOF for frame B,
  1114. * then we need to find out if the hardware is at frame B or C.
  1115. * +-----+-----+-----+
  1116. * | A | B | C |
  1117. * +-----+-----+-----+
  1118. */
  1119. a = ctx_isp->sof_timestamp_val;
  1120. if (a == prev_ts) {
  1121. /* Hardware is at frame B */
  1122. b = curr_ts;
  1123. CAM_DBG(CAM_ISP, "ctx:%u recovered timestamp (last:0x%llx, curr:0x%llx) req: %llu",
  1124. ctx->ctx_id, a, b, request_id);
  1125. } else if (a < prev_ts) {
  1126. /* Hardware is at frame C */
  1127. b = prev_ts;
  1128. c = curr_ts;
  1129. CAM_DBG(CAM_ISP,
  1130. "ctx:%u recovered timestamp (last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu",
  1131. ctx->ctx_id, a, b, c, request_id);
  1132. } else {
  1133. /* Hardware is at frame A (which we supposedly missed) */
  1134. CAM_ERR_RATE_LIMIT(CAM_ISP,
  1135. "ctx:%u erroneous call to SOF recovery (last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu",
  1136. ctx->ctx_id, a, prev_ts, curr_ts, request_id);
  1137. return 0;
  1138. }
  1139. ctx_isp->boot_timestamp = boot_ts + (b - curr_ts);
  1140. ctx_isp->sof_timestamp_val = b;
  1141. ctx_isp->frame_id++;
  1142. return 0;
  1143. }
  1144. static void __cam_isp_ctx_send_sof_boot_timestamp(
  1145. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1146. uint32_t sof_event_status)
  1147. {
  1148. struct cam_req_mgr_message req_msg;
  1149. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1150. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1151. req_msg.u.frame_msg.request_id = request_id;
  1152. req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
  1153. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1154. req_msg.u.frame_msg.sof_status = sof_event_status;
  1155. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1156. CAM_DBG(CAM_ISP,
  1157. "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u",
  1158. request_id, ctx_isp->frame_id,
  1159. ctx_isp->boot_timestamp, sof_event_status);
  1160. if (cam_req_mgr_notify_message(&req_msg,
  1161. V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
  1162. V4L_EVENT_CAM_REQ_MGR_EVENT))
  1163. CAM_ERR(CAM_ISP,
  1164. "Error in notifying the boot time for req id:%lld",
  1165. request_id);
  1166. }
  1167. static void __cam_isp_ctx_send_unified_timestamp(
  1168. struct cam_isp_context *ctx_isp, uint64_t request_id)
  1169. {
  1170. struct cam_req_mgr_message req_msg;
  1171. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1172. req_msg.u.frame_msg_v2.frame_id = ctx_isp->frame_id;
  1173. req_msg.u.frame_msg_v2.request_id = request_id;
  1174. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_SOF_QTIMER_TIMESTAMP] =
  1175. (request_id == 0) ? 0 : ctx_isp->sof_timestamp_val;
  1176. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_BOOT_TIMESTAMP] = ctx_isp->boot_timestamp;
  1177. req_msg.u.frame_msg_v2.link_hdl = ctx_isp->base->link_hdl;
  1178. req_msg.u.frame_msg_v2.frame_id_meta = ctx_isp->frame_id_meta;
  1179. CAM_DBG(CAM_ISP,
  1180. "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:0x%llx ctx %d\
  1181. boot time stamp:0x%llx", ctx_isp->base->link_hdl, request_id,
  1182. ctx_isp->frame_id, ctx_isp->sof_timestamp_val,ctx_isp->base->ctx_id,
  1183. ctx_isp->boot_timestamp);
  1184. if (cam_req_mgr_notify_message(&req_msg,
  1185. V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1186. CAM_ERR(CAM_ISP,
  1187. "Error in notifying the sof and boot time for req id:%lld",
  1188. request_id);
  1189. }
  1190. static void __cam_isp_ctx_send_sof_timestamp_frame_header(
  1191. struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr,
  1192. uint64_t request_id, uint32_t sof_event_status)
  1193. {
  1194. uint32_t *time32 = NULL;
  1195. uint64_t timestamp = 0;
  1196. struct cam_req_mgr_message req_msg;
  1197. time32 = frame_header_cpu_addr;
  1198. timestamp = (uint64_t) time32[1];
  1199. timestamp = timestamp << 24;
  1200. timestamp |= (uint64_t)(time32[0] >> 8);
  1201. timestamp = mul_u64_u32_div(timestamp,
  1202. CAM_IFE_QTIMER_MUL_FACTOR,
  1203. CAM_IFE_QTIMER_DIV_FACTOR);
  1204. ctx_isp->sof_timestamp_val = timestamp;
  1205. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1206. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1207. req_msg.u.frame_msg.request_id = request_id;
  1208. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1209. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1210. req_msg.u.frame_msg.sof_status = sof_event_status;
  1211. CAM_DBG(CAM_ISP,
  1212. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1213. request_id, ctx_isp->frame_id,
  1214. ctx_isp->sof_timestamp_val, sof_event_status);
  1215. if (cam_req_mgr_notify_message(&req_msg,
  1216. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1217. CAM_ERR(CAM_ISP,
  1218. "Error in notifying the sof time for req id:%lld",
  1219. request_id);
  1220. }
  1221. static void __cam_isp_ctx_send_sof_timestamp(
  1222. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1223. uint32_t sof_event_status)
  1224. {
  1225. struct cam_req_mgr_message req_msg;
  1226. struct cam_context *ctx = ctx_isp->base;
  1227. if (ctx_isp->reported_frame_id == ctx_isp->frame_id) {
  1228. if (__cam_isp_ctx_recover_sof_timestamp(ctx_isp->base, request_id))
  1229. CAM_WARN(CAM_ISP, "Missed SOF. Unable to recover SOF timestamp.");
  1230. }
  1231. if (request_id == 0 && (ctx_isp->reported_frame_id == ctx_isp->frame_id)) {
  1232. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1233. "Missed SOF Recovery for invalid req, Skip notificaiton to userspace Ctx: %u frame_id %u",
  1234. ctx->ctx_id, ctx_isp->frame_id);
  1235. return;
  1236. }
  1237. ctx_isp->reported_frame_id = ctx_isp->frame_id;
  1238. if ((ctx_isp->v4l2_event_sub_ids & (1 << V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS))
  1239. && !ctx_isp->use_frame_header_ts) {
  1240. __cam_isp_ctx_send_unified_timestamp(ctx_isp,request_id);
  1241. return;
  1242. }
  1243. if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
  1244. goto end;
  1245. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1246. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1247. req_msg.u.frame_msg.request_id = request_id;
  1248. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1249. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1250. req_msg.u.frame_msg.sof_status = sof_event_status;
  1251. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1252. CAM_DBG(CAM_ISP,
  1253. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1254. request_id, ctx_isp->frame_id,
  1255. ctx_isp->sof_timestamp_val, sof_event_status);
  1256. if (cam_req_mgr_notify_message(&req_msg,
  1257. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1258. CAM_ERR(CAM_ISP,
  1259. "Error in notifying the sof time for req id:%lld",
  1260. request_id);
  1261. end:
  1262. __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
  1263. request_id, sof_event_status);
  1264. }
  1265. static void __cam_isp_ctx_handle_buf_done_fail_log(
  1266. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1267. struct cam_isp_ctx_req *req_isp)
  1268. {
  1269. int i;
  1270. struct cam_context *ctx = ctx_isp->base;
  1271. const char *handle_type;
  1272. if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
  1273. CAM_ERR(CAM_ISP,
  1274. "Num Resources exceed mMAX %d >= %d ",
  1275. req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
  1276. return;
  1277. }
  1278. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1279. "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d",
  1280. request_id, req_isp->num_fence_map_out, req_isp->num_acked,
  1281. req_isp->bubble_report, req_isp->bubble_detected);
  1282. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1283. "Resource Handles that fail to generate buf_done in prev frame");
  1284. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1285. if (req_isp->fence_map_out[i].sync_id != -1) {
  1286. handle_type = __cam_isp_resource_handle_id_to_type(
  1287. ctx_isp->isp_device_type,
  1288. req_isp->fence_map_out[i].resource_handle);
  1289. trace_cam_log_event("Buf_done Congestion",
  1290. handle_type, request_id, req_isp->fence_map_out[i].sync_id);
  1291. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1292. "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
  1293. handle_type,
  1294. req_isp->fence_map_out[i].resource_handle,
  1295. req_isp->fence_map_out[i].sync_id);
  1296. }
  1297. }
  1298. if (!ctx_isp->sof_dbg_irq_en)
  1299. __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  1300. }
  1301. static void __cam_isp_context_reset_internal_recovery_params(
  1302. struct cam_isp_context *ctx_isp)
  1303. {
  1304. atomic_set(&ctx_isp->internal_recovery_set, 0);
  1305. atomic_set(&ctx_isp->process_bubble, 0);
  1306. ctx_isp->recovery_req_id = 0;
  1307. ctx_isp->aeb_error_cnt = 0;
  1308. ctx_isp->bubble_frame_cnt = 0;
  1309. ctx_isp->sof_dbg_irq_en = false;
  1310. }
  1311. static int __cam_isp_context_try_internal_recovery(
  1312. struct cam_isp_context *ctx_isp)
  1313. {
  1314. int rc = 0;
  1315. struct cam_context *ctx = ctx_isp->base;
  1316. struct cam_ctx_request *req;
  1317. struct cam_isp_ctx_req *req_isp;
  1318. /*
  1319. * Start with wait list, if recovery is stil set
  1320. * errored request has not been moved to pending yet.
  1321. * Buf done for errored request has not occurred recover
  1322. * from here
  1323. */
  1324. if (!list_empty(&ctx->wait_req_list)) {
  1325. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  1326. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1327. if (req->request_id == ctx_isp->recovery_req_id) {
  1328. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1329. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1330. if (rc) {
  1331. /* Unable to do bubble recovery reset back to normal */
  1332. CAM_WARN(CAM_ISP,
  1333. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1334. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1335. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1336. req_isp->bubble_detected = false;
  1337. goto end;
  1338. }
  1339. list_del_init(&req->list);
  1340. list_add(&req->list, &ctx->pending_req_list);
  1341. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1342. CAM_INFO(CAM_ISP,
  1343. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1344. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1345. goto end;
  1346. }
  1347. }
  1348. /*
  1349. * If not in wait list only other possibility is request is in pending list
  1350. * on error detection, bubble detect is set assuming new frame after detection
  1351. * comes in, there is an rup it's moved to active list and it finishes with
  1352. * it's buf done's
  1353. */
  1354. if (!list_empty(&ctx->pending_req_list)) {
  1355. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list);
  1356. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1357. if (req->request_id == ctx_isp->recovery_req_id) {
  1358. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1359. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1360. if (rc) {
  1361. /* Unable to do bubble recovery reset back to normal */
  1362. CAM_WARN(CAM_ISP,
  1363. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1364. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1365. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1366. req_isp->bubble_detected = false;
  1367. goto end;
  1368. }
  1369. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1370. CAM_INFO(CAM_ISP,
  1371. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1372. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1373. goto end;
  1374. }
  1375. }
  1376. /* If request is not found in either of the lists skip recovery */
  1377. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1378. end:
  1379. return rc;
  1380. }
  1381. static int __cam_isp_ctx_handle_buf_done_for_req_list(
  1382. struct cam_isp_context *ctx_isp,
  1383. struct cam_ctx_request *req)
  1384. {
  1385. int rc = 0, i;
  1386. uint64_t buf_done_req_id;
  1387. struct cam_isp_ctx_req *req_isp;
  1388. struct cam_context *ctx = ctx_isp->base;
  1389. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1390. ctx_isp->active_req_cnt--;
  1391. buf_done_req_id = req->request_id;
  1392. if (req_isp->bubble_detected && req_isp->bubble_report) {
  1393. req_isp->num_acked = 0;
  1394. req_isp->num_deferred_acks = 0;
  1395. req_isp->bubble_detected = false;
  1396. list_del_init(&req->list);
  1397. atomic_set(&ctx_isp->process_bubble, 0);
  1398. req_isp->cdm_reset_before_apply = false;
  1399. ctx_isp->bubble_frame_cnt = 0;
  1400. if (buf_done_req_id <= ctx->last_flush_req) {
  1401. for (i = 0; i < req_isp->num_fence_map_out; i++)
  1402. rc = cam_sync_signal(
  1403. req_isp->fence_map_out[i].sync_id,
  1404. CAM_SYNC_STATE_SIGNALED_ERROR,
  1405. CAM_SYNC_ISP_EVENT_BUBBLE);
  1406. list_add_tail(&req->list, &ctx->free_req_list);
  1407. CAM_DBG(CAM_REQ,
  1408. "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
  1409. buf_done_req_id, ctx_isp->active_req_cnt,
  1410. ctx->ctx_id);
  1411. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1412. } else {
  1413. list_add(&req->list, &ctx->pending_req_list);
  1414. CAM_DBG(CAM_REQ,
  1415. "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
  1416. req->request_id, ctx_isp->active_req_cnt,
  1417. ctx->ctx_id);
  1418. }
  1419. } else {
  1420. if (!ctx_isp->use_frame_header_ts) {
  1421. if (ctx_isp->reported_req_id < buf_done_req_id) {
  1422. ctx_isp->reported_req_id = buf_done_req_id;
  1423. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  1424. buf_done_req_id,
  1425. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1426. }
  1427. }
  1428. list_del_init(&req->list);
  1429. list_add_tail(&req->list, &ctx->free_req_list);
  1430. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  1431. req_isp->cdm_reset_before_apply = false;
  1432. req_isp->num_acked = 0;
  1433. req_isp->num_deferred_acks = 0;
  1434. /*
  1435. * Only update the process_bubble and bubble_frame_cnt
  1436. * when bubble is detected on this req, in case the other
  1437. * request is processing bubble.
  1438. */
  1439. if (req_isp->bubble_detected) {
  1440. atomic_set(&ctx_isp->process_bubble, 0);
  1441. ctx_isp->bubble_frame_cnt = 0;
  1442. req_isp->bubble_detected = false;
  1443. }
  1444. CAM_DBG(CAM_REQ,
  1445. "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
  1446. buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1447. ctx_isp->req_info.last_bufdone_req_id = req->request_id;
  1448. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1449. }
  1450. if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt)
  1451. __cam_isp_context_try_internal_recovery(ctx_isp);
  1452. cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
  1453. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1454. CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
  1455. __cam_isp_ctx_update_event_record(ctx_isp,
  1456. CAM_ISP_CTX_EVENT_BUFDONE, req);
  1457. return rc;
  1458. }
  1459. static int __cam_isp_ctx_handle_buf_done_for_request(
  1460. struct cam_isp_context *ctx_isp,
  1461. struct cam_ctx_request *req,
  1462. struct cam_isp_hw_done_event_data *done,
  1463. uint32_t bubble_state,
  1464. struct cam_isp_hw_done_event_data *done_next_req)
  1465. {
  1466. int rc = 0;
  1467. int i, j;
  1468. struct cam_isp_ctx_req *req_isp;
  1469. struct cam_context *ctx = ctx_isp->base;
  1470. const char *handle_type;
  1471. struct cam_isp_context_comp_record *comp_grp = NULL;
  1472. trace_cam_buf_done("ISP", ctx, req);
  1473. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1474. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1475. bubble_state, req_isp->bubble_detected);
  1476. done_next_req->resource_handle = 0;
  1477. done_next_req->timestamp = done->timestamp;
  1478. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1479. if (done->resource_handle ==
  1480. req_isp->fence_map_out[i].resource_handle)
  1481. break;
  1482. }
  1483. if (i == req_isp->num_fence_map_out) {
  1484. /*
  1485. * If not found in current request, it could be
  1486. * belonging to next request, this can happen if
  1487. * IRQ delay happens. It is only valid when the
  1488. * platform doesn't have last consumed address.
  1489. */
  1490. CAM_WARN(CAM_ISP,
  1491. "BUF_DONE for res %s not found in Req %lld ",
  1492. __cam_isp_resource_handle_id_to_type(
  1493. ctx_isp->isp_device_type,
  1494. done->resource_handle),
  1495. req->request_id);
  1496. done_next_req->hw_type = done->hw_type;
  1497. done_next_req->resource_handle = done->resource_handle;
  1498. done_next_req->comp_group_id = done->comp_group_id;
  1499. goto check_deferred;
  1500. }
  1501. if (done->hw_type == CAM_ISP_HW_TYPE_SFE)
  1502. comp_grp = &ctx_isp->sfe_bus_comp_grp[done->comp_group_id];
  1503. else
  1504. comp_grp = &ctx_isp->vfe_bus_comp_grp[done->comp_group_id];
  1505. if (!comp_grp) {
  1506. CAM_ERR(CAM_ISP, "comp_grp is NULL");
  1507. rc = -EINVAL;
  1508. return rc;
  1509. }
  1510. for (i = 0; i < comp_grp->num_res; i++) {
  1511. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1512. if (comp_grp->res_id[i] ==
  1513. req_isp->fence_map_out[j].resource_handle)
  1514. break;
  1515. }
  1516. if (j == req_isp->num_fence_map_out) {
  1517. /*
  1518. * If not found in current request, it could be
  1519. * belonging to an active port with no valid fence
  1520. * bound to it, we needn't process it.
  1521. */
  1522. CAM_DBG(CAM_ISP,
  1523. "BUF_DONE for res %s not active in Req %lld ",
  1524. __cam_isp_resource_handle_id_to_type(
  1525. ctx_isp->isp_device_type,
  1526. comp_grp->res_id[i]),
  1527. req->request_id);
  1528. continue;
  1529. }
  1530. if (req_isp->fence_map_out[j].sync_id == -1) {
  1531. handle_type =
  1532. __cam_isp_resource_handle_id_to_type(
  1533. ctx_isp->isp_device_type,
  1534. req_isp->fence_map_out[j].resource_handle);
  1535. CAM_WARN(CAM_ISP,
  1536. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1537. req->request_id, i, j, handle_type);
  1538. trace_cam_log_event("Duplicate BufDone",
  1539. handle_type, req->request_id, ctx->ctx_id);
  1540. continue;
  1541. }
  1542. /* Get buf handles from packet and retrieve them from presil framework */
  1543. if (cam_presil_mode_enabled()) {
  1544. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1545. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1546. if (rc) {
  1547. CAM_ERR(CAM_ISP,
  1548. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1549. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1550. return rc;
  1551. }
  1552. }
  1553. if (!req_isp->bubble_detected) {
  1554. CAM_DBG(CAM_ISP,
  1555. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1556. req->request_id,
  1557. req_isp->fence_map_out[j].resource_handle,
  1558. req_isp->fence_map_out[j].sync_id,
  1559. ctx->ctx_id);
  1560. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1561. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1562. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1563. if (rc)
  1564. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  1565. rc);
  1566. } else if (!req_isp->bubble_report) {
  1567. CAM_DBG(CAM_ISP,
  1568. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1569. req->request_id,
  1570. req_isp->fence_map_out[j].resource_handle,
  1571. req_isp->fence_map_out[j].sync_id,
  1572. ctx->ctx_id);
  1573. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1574. CAM_SYNC_STATE_SIGNALED_ERROR,
  1575. CAM_SYNC_ISP_EVENT_BUBBLE);
  1576. if (rc)
  1577. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  1578. rc);
  1579. } else {
  1580. /*
  1581. * Ignore the buffer done if bubble detect is on
  1582. * Increment the ack number here, and queue the
  1583. * request back to pending list whenever all the
  1584. * buffers are done.
  1585. */
  1586. req_isp->num_acked++;
  1587. CAM_DBG(CAM_ISP,
  1588. "buf done with bubble state %d recovery %d for req %lld, ctx %u",
  1589. bubble_state,
  1590. req_isp->bubble_report,
  1591. req->request_id,
  1592. ctx->ctx_id);
  1593. continue;
  1594. }
  1595. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1596. req->request_id,
  1597. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1598. if (!rc) {
  1599. req_isp->num_acked++;
  1600. req_isp->fence_map_out[j].sync_id = -1;
  1601. }
  1602. if ((ctx_isp->use_frame_header_ts) &&
  1603. (req_isp->hw_update_data.frame_header_res_id ==
  1604. req_isp->fence_map_out[j].resource_handle))
  1605. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1606. ctx_isp,
  1607. req_isp->hw_update_data.frame_header_cpu_addr,
  1608. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1609. }
  1610. check_deferred:
  1611. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1612. /* Should not happen */
  1613. CAM_ERR(CAM_ISP,
  1614. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1615. req->request_id, req_isp->num_acked,
  1616. req_isp->num_fence_map_out, ctx->ctx_id);
  1617. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1618. }
  1619. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1620. return rc;
  1621. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1622. return rc;
  1623. }
  1624. static int __cam_isp_handle_deferred_buf_done(
  1625. struct cam_isp_context *ctx_isp,
  1626. struct cam_ctx_request *req,
  1627. bool bubble_handling,
  1628. uint32_t status, uint32_t event_cause)
  1629. {
  1630. int i, j;
  1631. int rc = 0;
  1632. struct cam_isp_ctx_req *req_isp =
  1633. (struct cam_isp_ctx_req *) req->req_priv;
  1634. struct cam_context *ctx = ctx_isp->base;
  1635. CAM_DBG(CAM_ISP,
  1636. "ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
  1637. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1638. req_isp->num_acked, bubble_handling);
  1639. for (i = 0; i < req_isp->num_deferred_acks; i++) {
  1640. j = req_isp->deferred_fence_map_index[i];
  1641. CAM_DBG(CAM_ISP,
  1642. "ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
  1643. ctx->ctx_id, status, event_cause,
  1644. req->request_id,
  1645. req_isp->fence_map_out[j].resource_handle,
  1646. req_isp->fence_map_out[j].sync_id);
  1647. if (req_isp->fence_map_out[j].sync_id == -1) {
  1648. CAM_WARN(CAM_ISP,
  1649. "ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
  1650. ctx->ctx_id, req->request_id, j,
  1651. req_isp->fence_map_out[j].resource_handle);
  1652. continue;
  1653. }
  1654. if (!bubble_handling) {
  1655. CAM_WARN(CAM_ISP,
  1656. "Unexpected Buf done for res=0x%x on ctx[%d] for Req %llu, status=%d, possible bh delays",
  1657. req_isp->fence_map_out[j].resource_handle, ctx->ctx_id,
  1658. req->request_id, status);
  1659. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1660. status, event_cause);
  1661. if (rc) {
  1662. CAM_ERR(CAM_ISP,
  1663. "ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
  1664. ctx->ctx_id, req->request_id,
  1665. req_isp->fence_map_out[j].sync_id,
  1666. status, rc);
  1667. } else {
  1668. req_isp->num_acked++;
  1669. req_isp->fence_map_out[j].sync_id = -1;
  1670. }
  1671. } else {
  1672. req_isp->num_acked++;
  1673. }
  1674. }
  1675. CAM_DBG(CAM_ISP,
  1676. "ctx[%d] : Req %llu : Handled %d deferred buf_dones num_acked=%d, num_fence_map_out=%d",
  1677. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1678. req_isp->num_acked, req_isp->num_fence_map_out);
  1679. req_isp->num_deferred_acks = 0;
  1680. return rc;
  1681. }
  1682. static int __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  1683. struct cam_isp_context *ctx_isp,
  1684. struct cam_ctx_request *req)
  1685. {
  1686. int rc = 0;
  1687. struct cam_context *ctx = ctx_isp->base;
  1688. struct cam_isp_ctx_req *req_isp;
  1689. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1690. if (req_isp->num_deferred_acks)
  1691. rc = __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1692. req_isp->bubble_report,
  1693. CAM_SYNC_STATE_SIGNALED_ERROR,
  1694. CAM_SYNC_ISP_EVENT_BUBBLE);
  1695. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1696. /* Should not happen */
  1697. CAM_ERR(CAM_ISP,
  1698. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1699. req->request_id, req_isp->num_acked,
  1700. req_isp->num_fence_map_out, ctx->ctx_id);
  1701. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1702. }
  1703. if (req_isp->num_acked == req_isp->num_fence_map_out)
  1704. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1705. return rc;
  1706. }
  1707. static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1708. struct cam_isp_context *ctx_isp,
  1709. struct cam_ctx_request *req,
  1710. struct cam_isp_hw_done_event_data *done,
  1711. uint32_t bubble_state,
  1712. bool verify_consumed_addr,
  1713. bool defer_buf_done)
  1714. {
  1715. int rc = 0;
  1716. int i, j;
  1717. struct cam_isp_ctx_req *req_isp;
  1718. struct cam_context *ctx = ctx_isp->base;
  1719. const char *handle_type;
  1720. uint32_t cmp_addr = 0;
  1721. struct cam_isp_hw_done_event_data unhandled_done = {0};
  1722. struct cam_isp_context_comp_record *comp_grp = NULL;
  1723. trace_cam_buf_done("ISP", ctx, req);
  1724. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1725. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1726. bubble_state, req_isp->bubble_detected);
  1727. unhandled_done.timestamp = done->timestamp;
  1728. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1729. if (done->resource_handle ==
  1730. req_isp->fence_map_out[i].resource_handle) {
  1731. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1732. req_isp->fence_map_out[i].image_buf_addr[0]) :
  1733. req_isp->fence_map_out[i].image_buf_addr[0];
  1734. if (!verify_consumed_addr ||
  1735. (verify_consumed_addr && (done->last_consumed_addr == cmp_addr))) {
  1736. break;
  1737. }
  1738. }
  1739. }
  1740. if (i == req_isp->num_fence_map_out) {
  1741. /*
  1742. * If not found in current request, it could be
  1743. * belonging to next request, this can happen if
  1744. * IRQ delay happens. It is only valid when the
  1745. * platform doesn't have last consumed address.
  1746. */
  1747. CAM_WARN(CAM_ISP,
  1748. "BUF_DONE for res %s not found in Req %lld ",
  1749. __cam_isp_resource_handle_id_to_type(
  1750. ctx_isp->isp_device_type, done->resource_handle),
  1751. req->request_id);
  1752. unhandled_done.hw_type = done->hw_type;
  1753. unhandled_done.resource_handle = done->resource_handle;
  1754. unhandled_done.comp_group_id = done->comp_group_id;
  1755. unhandled_done.last_consumed_addr = done->last_consumed_addr;
  1756. goto check_deferred;
  1757. }
  1758. if (done->hw_type == CAM_ISP_HW_TYPE_SFE)
  1759. comp_grp = &ctx_isp->sfe_bus_comp_grp[done->comp_group_id];
  1760. else
  1761. comp_grp = &ctx_isp->vfe_bus_comp_grp[done->comp_group_id];
  1762. if (!comp_grp) {
  1763. CAM_ERR(CAM_ISP, "comp_grp is NULL");
  1764. rc = -EINVAL;
  1765. return rc;
  1766. }
  1767. for (i = 0; i < comp_grp->num_res; i++) {
  1768. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1769. if (comp_grp->res_id[i] ==
  1770. req_isp->fence_map_out[j].resource_handle)
  1771. break;
  1772. }
  1773. if (j == req_isp->num_fence_map_out) {
  1774. /*
  1775. * If not found in current request, it could be
  1776. * belonging to an active port with no valid fence
  1777. * bound to it, we needn't process it.
  1778. */
  1779. CAM_DBG(CAM_ISP,
  1780. "BUF_DONE for res %s not active in Req %lld ",
  1781. __cam_isp_resource_handle_id_to_type(
  1782. ctx_isp->isp_device_type, comp_grp->res_id[i]),
  1783. req->request_id);
  1784. continue;
  1785. }
  1786. if (req_isp->fence_map_out[j].sync_id == -1) {
  1787. handle_type = __cam_isp_resource_handle_id_to_type(
  1788. ctx_isp->isp_device_type,
  1789. req_isp->fence_map_out[j].resource_handle);
  1790. CAM_WARN(CAM_ISP,
  1791. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1792. req->request_id, i, j, handle_type);
  1793. trace_cam_log_event("Duplicate BufDone",
  1794. handle_type, req->request_id, ctx->ctx_id);
  1795. continue;
  1796. }
  1797. /* Get buf handles from packet and retrieve them from presil framework */
  1798. if (cam_presil_mode_enabled()) {
  1799. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1800. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1801. if (rc) {
  1802. CAM_ERR(CAM_ISP,
  1803. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1804. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1805. return rc;
  1806. }
  1807. }
  1808. if (defer_buf_done) {
  1809. uint32_t deferred_indx = req_isp->num_deferred_acks;
  1810. /*
  1811. * If we are handling this BUF_DONE event for a request
  1812. * that is still in wait_list, do not signal now,
  1813. * instead mark it as done and handle it later -
  1814. * if this request is going into BUBBLE state later
  1815. * it will automatically be re-applied. If this is not
  1816. * going into BUBBLE, signal fences later.
  1817. * Note - we will come here only if the last consumed
  1818. * address matches with this ports buffer.
  1819. */
  1820. req_isp->deferred_fence_map_index[deferred_indx] = j;
  1821. req_isp->num_deferred_acks++;
  1822. CAM_DBG(CAM_ISP,
  1823. "ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
  1824. ctx->ctx_id, req->request_id, bubble_state,
  1825. req_isp->bubble_report);
  1826. CAM_DBG(CAM_ISP,
  1827. "ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
  1828. ctx->ctx_id, req_isp->num_deferred_acks, j,
  1829. req_isp->fence_map_out[j].resource_handle,
  1830. req_isp->fence_map_out[j].sync_id);
  1831. continue;
  1832. } else if (!req_isp->bubble_detected) {
  1833. CAM_DBG(CAM_ISP,
  1834. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1835. req->request_id,
  1836. req_isp->fence_map_out[j].resource_handle,
  1837. req_isp->fence_map_out[j].sync_id,
  1838. ctx->ctx_id);
  1839. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1840. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1841. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1842. if (rc) {
  1843. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1844. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1845. } else if (req_isp->num_deferred_acks) {
  1846. /* Process deferred buf_done acks */
  1847. __cam_isp_handle_deferred_buf_done(ctx_isp,
  1848. req, false,
  1849. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1850. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1851. }
  1852. /* Reset fence */
  1853. req_isp->fence_map_out[j].sync_id = -1;
  1854. } else if (!req_isp->bubble_report) {
  1855. CAM_DBG(CAM_ISP,
  1856. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1857. req->request_id,
  1858. req_isp->fence_map_out[j].resource_handle,
  1859. req_isp->fence_map_out[j].sync_id,
  1860. ctx->ctx_id);
  1861. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1862. CAM_SYNC_STATE_SIGNALED_ERROR,
  1863. CAM_SYNC_ISP_EVENT_BUBBLE);
  1864. if (rc) {
  1865. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1866. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1867. } else if (req_isp->num_deferred_acks) {
  1868. /* Process deferred buf_done acks */
  1869. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1870. false,
  1871. CAM_SYNC_STATE_SIGNALED_ERROR,
  1872. CAM_SYNC_ISP_EVENT_BUBBLE);
  1873. }
  1874. /* Reset fence */
  1875. req_isp->fence_map_out[j].sync_id = -1;
  1876. } else {
  1877. /*
  1878. * Ignore the buffer done if bubble detect is on
  1879. * Increment the ack number here, and queue the
  1880. * request back to pending list whenever all the
  1881. * buffers are done.
  1882. */
  1883. req_isp->num_acked++;
  1884. CAM_DBG(CAM_ISP,
  1885. "buf done with bubble state %d recovery %d for req %lld, ctx %u",
  1886. bubble_state,
  1887. req_isp->bubble_report,
  1888. req->request_id,
  1889. ctx->ctx_id);
  1890. /* Process deferred buf_done acks */
  1891. if (req_isp->num_deferred_acks)
  1892. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1893. true,
  1894. CAM_SYNC_STATE_SIGNALED_ERROR,
  1895. CAM_SYNC_ISP_EVENT_BUBBLE);
  1896. if (req_isp->num_acked == req_isp->num_fence_map_out) {
  1897. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1898. if (rc)
  1899. CAM_ERR(CAM_ISP,
  1900. "Error in buf done for req = %llu with rc = %d",
  1901. req->request_id, rc);
  1902. return rc;
  1903. }
  1904. continue;
  1905. }
  1906. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1907. req->request_id,
  1908. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1909. if (!rc) {
  1910. req_isp->num_acked++;
  1911. }
  1912. if ((ctx_isp->use_frame_header_ts) &&
  1913. (req_isp->hw_update_data.frame_header_res_id ==
  1914. req_isp->fence_map_out[j].resource_handle))
  1915. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1916. ctx_isp,
  1917. req_isp->hw_update_data.frame_header_cpu_addr,
  1918. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1919. }
  1920. check_deferred:
  1921. if ((unhandled_done.resource_handle > 0) && (!defer_buf_done))
  1922. __cam_isp_ctx_check_deferred_buf_done(
  1923. ctx_isp, &unhandled_done, bubble_state);
  1924. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1925. /* Should not happen */
  1926. CAM_ERR(CAM_ISP,
  1927. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1928. req->request_id, req_isp->num_acked,
  1929. req_isp->num_fence_map_out, ctx->ctx_id);
  1930. }
  1931. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1932. return rc;
  1933. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1934. return rc;
  1935. }
  1936. static int __cam_isp_ctx_handle_buf_done(
  1937. struct cam_isp_context *ctx_isp,
  1938. struct cam_isp_hw_done_event_data *done,
  1939. uint32_t bubble_state)
  1940. {
  1941. int rc = 0;
  1942. struct cam_ctx_request *req;
  1943. struct cam_context *ctx = ctx_isp->base;
  1944. struct cam_isp_hw_done_event_data done_next_req = {0};
  1945. if (list_empty(&ctx->active_req_list)) {
  1946. CAM_WARN(CAM_ISP, "Buf done with no active request");
  1947. return 0;
  1948. }
  1949. req = list_first_entry(&ctx->active_req_list,
  1950. struct cam_ctx_request, list);
  1951. rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done,
  1952. bubble_state, &done_next_req);
  1953. if (done_next_req.resource_handle) {
  1954. struct cam_isp_hw_done_event_data unhandled_res = {0};
  1955. struct cam_ctx_request *next_req = list_last_entry(
  1956. &ctx->active_req_list, struct cam_ctx_request, list);
  1957. if (next_req->request_id != req->request_id) {
  1958. /*
  1959. * Few resource handles are already signalled in the
  1960. * current request, lets check if there is another
  1961. * request waiting for these resources. This can
  1962. * happen if handling some of next request's buf done
  1963. * events are happening first before handling current
  1964. * request's remaining buf dones due to IRQ scheduling.
  1965. * Lets check only one more request as we will have
  1966. * maximum of 2 requests in active_list at any time.
  1967. */
  1968. CAM_WARN(CAM_ISP,
  1969. "Unhandled buf done resources for req %lld, trying next request %lld in active_list",
  1970. req->request_id, next_req->request_id);
  1971. __cam_isp_ctx_handle_buf_done_for_request(ctx_isp,
  1972. next_req, &done_next_req,
  1973. bubble_state, &unhandled_res);
  1974. if (unhandled_res.resource_handle == 0)
  1975. CAM_INFO(CAM_ISP,
  1976. "BUF Done event handed for next request %lld",
  1977. next_req->request_id);
  1978. else
  1979. CAM_ERR(CAM_ISP,
  1980. "BUF Done not handled for next request %lld",
  1981. next_req->request_id);
  1982. } else {
  1983. CAM_WARN(CAM_ISP,
  1984. "Req %lld only active request, spurious buf_done rxd",
  1985. req->request_id);
  1986. }
  1987. }
  1988. return rc;
  1989. }
  1990. static void __cam_isp_ctx_buf_done_match_req(
  1991. struct cam_ctx_request *req,
  1992. struct cam_isp_hw_done_event_data *done,
  1993. bool *irq_delay_detected)
  1994. {
  1995. int i;
  1996. uint32_t match_count = 0;
  1997. struct cam_isp_ctx_req *req_isp;
  1998. uint32_t cmp_addr = 0;
  1999. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2000. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2001. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  2002. req_isp->fence_map_out[i].image_buf_addr[0]) :
  2003. req_isp->fence_map_out[i].image_buf_addr[0];
  2004. if ((done->resource_handle ==
  2005. req_isp->fence_map_out[i].resource_handle) &&
  2006. (done->last_consumed_addr == cmp_addr)) {
  2007. match_count++;
  2008. break;
  2009. }
  2010. }
  2011. if (match_count > 0)
  2012. *irq_delay_detected = true;
  2013. else
  2014. *irq_delay_detected = false;
  2015. CAM_DBG(CAM_ISP,
  2016. "buf done num handles %d match count %d for next req:%lld",
  2017. done->resource_handle, match_count, req->request_id);
  2018. CAM_DBG(CAM_ISP,
  2019. "irq_delay_detected %d", *irq_delay_detected);
  2020. }
  2021. static void __cam_isp_ctx_try_buf_done_process_for_active_request(
  2022. uint32_t deferred_ack_start_idx, struct cam_isp_context *ctx_isp,
  2023. struct cam_ctx_request *deferred_req)
  2024. {
  2025. int i, j, deferred_map_idx, rc;
  2026. struct cam_context *ctx = ctx_isp->base;
  2027. struct cam_ctx_request *curr_active_req;
  2028. struct cam_isp_ctx_req *curr_active_isp_req;
  2029. struct cam_isp_ctx_req *deferred_isp_req;
  2030. if (list_empty(&ctx->active_req_list))
  2031. return;
  2032. curr_active_req = list_first_entry(&ctx->active_req_list,
  2033. struct cam_ctx_request, list);
  2034. curr_active_isp_req = (struct cam_isp_ctx_req *)curr_active_req->req_priv;
  2035. deferred_isp_req = (struct cam_isp_ctx_req *)deferred_req->req_priv;
  2036. /* Check from newly updated deferred acks */
  2037. for (i = deferred_ack_start_idx; i < deferred_isp_req->num_deferred_acks; i++) {
  2038. deferred_map_idx = deferred_isp_req->deferred_fence_map_index[i];
  2039. for (j = 0; j < curr_active_isp_req->num_fence_map_out; j++) {
  2040. /* resource needs to match */
  2041. if (curr_active_isp_req->fence_map_out[j].resource_handle !=
  2042. deferred_isp_req->fence_map_out[deferred_map_idx].resource_handle)
  2043. continue;
  2044. /* Check if fence is valid */
  2045. if (curr_active_isp_req->fence_map_out[j].sync_id == -1)
  2046. break;
  2047. CAM_WARN(CAM_ISP,
  2048. "Processing delayed buf done req: %llu bubble_detected: %s res: 0x%x fd: 0x%x, ctx: %u [deferred req: %llu last applied: %llu]",
  2049. curr_active_req->request_id,
  2050. CAM_BOOL_TO_YESNO(curr_active_isp_req->bubble_detected),
  2051. curr_active_isp_req->fence_map_out[j].resource_handle,
  2052. curr_active_isp_req->fence_map_out[j].sync_id, ctx->ctx_id,
  2053. deferred_req->request_id, ctx_isp->last_applied_req_id);
  2054. /* Signal only if bubble is not detected for this request */
  2055. if (!curr_active_isp_req->bubble_detected) {
  2056. rc = cam_sync_signal(curr_active_isp_req->fence_map_out[j].sync_id,
  2057. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  2058. CAM_SYNC_COMMON_EVENT_SUCCESS);
  2059. if (rc)
  2060. CAM_ERR(CAM_ISP,
  2061. "Sync: %d for req: %llu failed with rc: %d",
  2062. curr_active_isp_req->fence_map_out[j].sync_id,
  2063. curr_active_req->request_id, rc);
  2064. curr_active_isp_req->fence_map_out[j].sync_id = -1;
  2065. }
  2066. curr_active_isp_req->num_acked++;
  2067. break;
  2068. }
  2069. }
  2070. }
  2071. static int __cam_isp_ctx_check_deferred_buf_done(
  2072. struct cam_isp_context *ctx_isp,
  2073. struct cam_isp_hw_done_event_data *done,
  2074. uint32_t bubble_state)
  2075. {
  2076. int rc = 0;
  2077. uint32_t curr_num_deferred = 0;
  2078. struct cam_ctx_request *req;
  2079. struct cam_context *ctx = ctx_isp->base;
  2080. struct cam_isp_ctx_req *req_isp;
  2081. bool req_in_pending_wait_list = false;
  2082. if (!list_empty(&ctx->wait_req_list)) {
  2083. req = list_first_entry(&ctx->wait_req_list,
  2084. struct cam_ctx_request, list);
  2085. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2086. curr_num_deferred = req_isp->num_deferred_acks;
  2087. req_in_pending_wait_list = true;
  2088. if (ctx_isp->last_applied_req_id !=
  2089. ctx_isp->last_bufdone_err_apply_req_id) {
  2090. CAM_DBG(CAM_ISP,
  2091. "Trying to find buf done with req in wait list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u",
  2092. req->request_id, ctx_isp->last_applied_req_id,
  2093. ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred);
  2094. ctx_isp->last_bufdone_err_apply_req_id =
  2095. ctx_isp->last_applied_req_id;
  2096. }
  2097. /*
  2098. * Verify consumed address for this request to make sure
  2099. * we are handling the buf_done for the correct
  2100. * buffer. Also defer actual buf_done handling, i.e
  2101. * do not signal the fence as this request may go into
  2102. * Bubble state eventully.
  2103. */
  2104. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2105. ctx_isp, req, done, bubble_state, true, true);
  2106. /* Check for active req if any deferred is processed */
  2107. if (req_isp->num_deferred_acks > curr_num_deferred)
  2108. __cam_isp_ctx_try_buf_done_process_for_active_request(
  2109. curr_num_deferred, ctx_isp, req);
  2110. } else if (!list_empty(&ctx->pending_req_list)) {
  2111. /*
  2112. * We saw the case that the hw config is blocked due to
  2113. * some reason, the we get the reg upd and buf done before
  2114. * the req is added to wait req list.
  2115. */
  2116. req = list_first_entry(&ctx->pending_req_list,
  2117. struct cam_ctx_request, list);
  2118. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2119. curr_num_deferred = req_isp->num_deferred_acks;
  2120. req_in_pending_wait_list = true;
  2121. if (ctx_isp->last_applied_req_id !=
  2122. ctx_isp->last_bufdone_err_apply_req_id) {
  2123. CAM_DBG(CAM_ISP,
  2124. "Trying to find buf done with req in pending list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u",
  2125. req->request_id, ctx_isp->last_applied_req_id,
  2126. ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred);
  2127. ctx_isp->last_bufdone_err_apply_req_id =
  2128. ctx_isp->last_applied_req_id;
  2129. }
  2130. /*
  2131. * Verify consumed address for this request to make sure
  2132. * we are handling the buf_done for the correct
  2133. * buffer. Also defer actual buf_done handling, i.e
  2134. * do not signal the fence as this request may go into
  2135. * Bubble state eventully.
  2136. */
  2137. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2138. ctx_isp, req, done, bubble_state, true, true);
  2139. /* Check for active req if any deferred is processed */
  2140. if (req_isp->num_deferred_acks > curr_num_deferred)
  2141. __cam_isp_ctx_try_buf_done_process_for_active_request(
  2142. curr_num_deferred, ctx_isp, req);
  2143. }
  2144. if (!req_in_pending_wait_list && (ctx_isp->last_applied_req_id !=
  2145. ctx_isp->last_bufdone_err_apply_req_id)) {
  2146. CAM_DBG(CAM_ISP,
  2147. "Buf done with no active request bubble_state=%d last_applied_req_id:%lld",
  2148. bubble_state, ctx_isp->last_applied_req_id);
  2149. ctx_isp->last_bufdone_err_apply_req_id =
  2150. ctx_isp->last_applied_req_id;
  2151. }
  2152. return rc;
  2153. }
  2154. static int __cam_isp_ctx_handle_buf_done_verify_addr(
  2155. struct cam_isp_context *ctx_isp,
  2156. struct cam_isp_hw_done_event_data *done,
  2157. uint32_t bubble_state)
  2158. {
  2159. int rc = 0;
  2160. bool irq_delay_detected = false;
  2161. struct cam_ctx_request *req;
  2162. struct cam_ctx_request *next_req = NULL;
  2163. struct cam_context *ctx = ctx_isp->base;
  2164. if (list_empty(&ctx->active_req_list)) {
  2165. return __cam_isp_ctx_check_deferred_buf_done(
  2166. ctx_isp, done, bubble_state);
  2167. }
  2168. req = list_first_entry(&ctx->active_req_list,
  2169. struct cam_ctx_request, list);
  2170. if (ctx_isp->active_req_cnt > 1) {
  2171. next_req = list_last_entry(
  2172. &ctx->active_req_list,
  2173. struct cam_ctx_request, list);
  2174. if (next_req->request_id != req->request_id)
  2175. __cam_isp_ctx_buf_done_match_req(next_req, done,
  2176. &irq_delay_detected);
  2177. else
  2178. CAM_WARN(CAM_ISP,
  2179. "Req %lld only active request, spurious buf_done rxd",
  2180. req->request_id);
  2181. }
  2182. /*
  2183. * If irq delay isn't detected, then we need to verify
  2184. * the consumed address for current req, otherwise, we
  2185. * can't verify the consumed address.
  2186. */
  2187. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2188. ctx_isp, req, done, bubble_state,
  2189. !irq_delay_detected, false);
  2190. /*
  2191. * Verify the consumed address for next req all the time,
  2192. * since the reported buf done event may belong to current
  2193. * req, then we can't signal this event for next req.
  2194. */
  2195. if (!rc && irq_delay_detected)
  2196. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2197. ctx_isp, next_req, done,
  2198. bubble_state, true, false);
  2199. return rc;
  2200. }
  2201. static int __cam_isp_ctx_handle_buf_done_in_activated_state(
  2202. struct cam_isp_context *ctx_isp,
  2203. struct cam_isp_hw_done_event_data *done,
  2204. uint32_t bubble_state)
  2205. {
  2206. int rc = 0;
  2207. if (ctx_isp->support_consumed_addr)
  2208. rc = __cam_isp_ctx_handle_buf_done_verify_addr(
  2209. ctx_isp, done, bubble_state);
  2210. else
  2211. rc = __cam_isp_ctx_handle_buf_done(
  2212. ctx_isp, done, bubble_state);
  2213. return rc;
  2214. }
  2215. static int __cam_isp_ctx_apply_pending_req(
  2216. void *priv, void *data)
  2217. {
  2218. int rc = 0;
  2219. int64_t prev_applied_req;
  2220. struct cam_context *ctx = NULL;
  2221. struct cam_isp_context *ctx_isp = priv;
  2222. struct cam_ctx_request *req;
  2223. struct cam_isp_ctx_req *req_isp;
  2224. struct cam_hw_config_args cfg = {0};
  2225. if (!ctx_isp) {
  2226. CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx);
  2227. rc = -EINVAL;
  2228. goto end;
  2229. }
  2230. ctx = ctx_isp->base;
  2231. if (list_empty(&ctx->pending_req_list)) {
  2232. CAM_DBG(CAM_ISP, "No pending requests to apply");
  2233. rc = -EFAULT;
  2234. goto end;
  2235. }
  2236. if (ctx_isp->vfps_aux_context) {
  2237. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED)
  2238. goto end;
  2239. if (ctx_isp->active_req_cnt >= 1)
  2240. goto end;
  2241. } else {
  2242. if ((ctx->state != CAM_CTX_ACTIVATED) ||
  2243. (!atomic_read(&ctx_isp->rxd_epoch)) ||
  2244. (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED))
  2245. goto end;
  2246. if (ctx_isp->active_req_cnt >= 2)
  2247. goto end;
  2248. }
  2249. spin_lock_bh(&ctx->lock);
  2250. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2251. list);
  2252. spin_unlock_bh(&ctx->lock);
  2253. CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
  2254. req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
  2255. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2256. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2257. cfg.request_id = req->request_id;
  2258. cfg.hw_update_entries = req_isp->cfg;
  2259. cfg.num_hw_update_entries = req_isp->num_cfg;
  2260. cfg.priv = &req_isp->hw_update_data;
  2261. /*
  2262. * Offline mode may receive the SOF and REG_UPD earlier than
  2263. * CDM processing return back, so we set the substate before
  2264. * apply setting.
  2265. */
  2266. spin_lock_bh(&ctx->lock);
  2267. atomic_set(&ctx_isp->rxd_epoch, 0);
  2268. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
  2269. prev_applied_req = ctx_isp->last_applied_req_id;
  2270. ctx_isp->last_applied_req_id = req->request_id;
  2271. atomic_set(&ctx_isp->apply_in_progress, 1);
  2272. list_del_init(&req->list);
  2273. list_add_tail(&req->list, &ctx->wait_req_list);
  2274. spin_unlock_bh(&ctx->lock);
  2275. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  2276. if (rc) {
  2277. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
  2278. spin_lock_bh(&ctx->lock);
  2279. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2280. ctx_isp->last_applied_req_id = prev_applied_req;
  2281. atomic_set(&ctx_isp->apply_in_progress, 0);
  2282. list_del_init(&req->list);
  2283. list_add(&req->list, &ctx->pending_req_list);
  2284. spin_unlock_bh(&ctx->lock);
  2285. } else {
  2286. atomic_set(&ctx_isp->apply_in_progress, 0);
  2287. CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
  2288. CAM_ISP_CTX_ACTIVATED_APPLIED,
  2289. ctx_isp->last_applied_req_id);
  2290. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2291. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  2292. req->request_id);
  2293. }
  2294. end:
  2295. return rc;
  2296. }
  2297. static int __cam_isp_ctx_schedule_apply_req(
  2298. struct cam_isp_context *ctx_isp)
  2299. {
  2300. int rc = 0;
  2301. struct crm_workq_task *task;
  2302. task = cam_req_mgr_workq_get_task(ctx_isp->workq);
  2303. if (!task) {
  2304. CAM_ERR(CAM_ISP, "No task for worker");
  2305. return -ENOMEM;
  2306. }
  2307. task->process_cb = __cam_isp_ctx_apply_pending_req;
  2308. rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0);
  2309. if (rc)
  2310. CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc);
  2311. return rc;
  2312. }
  2313. static int __cam_isp_ctx_offline_epoch_in_activated_state(
  2314. struct cam_isp_context *ctx_isp, void *evt_data)
  2315. {
  2316. struct cam_context *ctx = ctx_isp->base;
  2317. struct cam_ctx_request *req, *req_temp;
  2318. uint64_t request_id = 0;
  2319. atomic_set(&ctx_isp->rxd_epoch, 1);
  2320. CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
  2321. ctx->ctx_id);
  2322. /*
  2323. * For offline it is not possible for epoch to be generated without
  2324. * RUP done. IRQ scheduling delays can possibly cause this.
  2325. */
  2326. if (list_empty(&ctx->active_req_list)) {
  2327. CAM_WARN(CAM_ISP, "Active list empty on ctx: %u - EPOCH serviced before RUP",
  2328. ctx->ctx_id);
  2329. } else {
  2330. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  2331. if (req->request_id > ctx_isp->reported_req_id) {
  2332. request_id = req->request_id;
  2333. ctx_isp->reported_req_id = request_id;
  2334. break;
  2335. }
  2336. }
  2337. }
  2338. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  2339. /*
  2340. * If no valid request, wait for RUP shutter posted after buf done
  2341. */
  2342. if (request_id)
  2343. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2344. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2345. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2346. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2347. request_id);
  2348. return 0;
  2349. }
  2350. static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
  2351. struct cam_isp_context *ctx_isp, void *evt_data)
  2352. {
  2353. if (ctx_isp->frame_id == 1)
  2354. CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR",
  2355. __cam_isp_ctx_substate_val_to_type(
  2356. ctx_isp->substate_activated));
  2357. else
  2358. CAM_WARN_RATE_LIMIT(CAM_ISP,
  2359. "ctx_id:%d Unexpected reg update in activated Substate[%s] for frame_id:%lld",
  2360. ctx_isp->base->ctx_id,
  2361. __cam_isp_ctx_substate_val_to_type(
  2362. ctx_isp->substate_activated),
  2363. ctx_isp->frame_id);
  2364. return 0;
  2365. }
  2366. static int __cam_isp_ctx_reg_upd_in_applied_state(
  2367. struct cam_isp_context *ctx_isp, void *evt_data)
  2368. {
  2369. int rc = 0;
  2370. struct cam_ctx_request *req;
  2371. struct cam_context *ctx = ctx_isp->base;
  2372. struct cam_isp_ctx_req *req_isp;
  2373. uint64_t request_id = 0;
  2374. if (list_empty(&ctx->wait_req_list)) {
  2375. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  2376. goto end;
  2377. }
  2378. req = list_first_entry(&ctx->wait_req_list,
  2379. struct cam_ctx_request, list);
  2380. list_del_init(&req->list);
  2381. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2382. if (req_isp->num_fence_map_out != 0) {
  2383. list_add_tail(&req->list, &ctx->active_req_list);
  2384. ctx_isp->active_req_cnt++;
  2385. request_id = req->request_id;
  2386. CAM_DBG(CAM_REQ,
  2387. "move request %lld to active list(cnt = %d), ctx %u",
  2388. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2389. __cam_isp_ctx_update_event_record(ctx_isp,
  2390. CAM_ISP_CTX_EVENT_RUP, req);
  2391. } else {
  2392. /* no io config, so the request is completed. */
  2393. list_add_tail(&req->list, &ctx->free_req_list);
  2394. CAM_DBG(CAM_ISP,
  2395. "move active request %lld to free list(cnt = %d), ctx %u",
  2396. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2397. }
  2398. /*
  2399. * This function only called directly from applied and bubble applied
  2400. * state so change substate here.
  2401. */
  2402. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2403. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2404. __cam_isp_ctx_substate_val_to_type(
  2405. ctx_isp->substate_activated));
  2406. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2407. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id);
  2408. end:
  2409. return rc;
  2410. }
  2411. static int __cam_isp_ctx_notify_sof_in_activated_state(
  2412. struct cam_isp_context *ctx_isp, void *evt_data)
  2413. {
  2414. int rc = 0;
  2415. uint64_t request_id = 0;
  2416. struct cam_context *ctx = ctx_isp->base;
  2417. struct cam_ctx_request *req;
  2418. struct cam_isp_ctx_req *req_isp;
  2419. struct cam_hw_cmd_args hw_cmd_args;
  2420. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  2421. uint64_t last_cdm_done_req = 0;
  2422. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2423. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2424. if (!evt_data) {
  2425. CAM_ERR(CAM_ISP, "invalid event data");
  2426. return -EINVAL;
  2427. }
  2428. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2429. if (atomic_read(&ctx_isp->process_bubble)) {
  2430. if (list_empty(&ctx->active_req_list)) {
  2431. CAM_ERR(CAM_ISP,
  2432. "No available active req in bubble");
  2433. atomic_set(&ctx_isp->process_bubble, 0);
  2434. ctx_isp->bubble_frame_cnt = 0;
  2435. rc = -EINVAL;
  2436. return rc;
  2437. }
  2438. if (ctx_isp->last_sof_timestamp ==
  2439. ctx_isp->sof_timestamp_val) {
  2440. CAM_DBG(CAM_ISP,
  2441. "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
  2442. ctx_isp->sof_timestamp_val);
  2443. goto notify_only;
  2444. }
  2445. req = list_first_entry(&ctx->active_req_list,
  2446. struct cam_ctx_request, list);
  2447. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2448. if (ctx_isp->bubble_frame_cnt >= 1 &&
  2449. req_isp->bubble_detected) {
  2450. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2451. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  2452. isp_hw_cmd_args.cmd_type =
  2453. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  2454. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  2455. rc = ctx->hw_mgr_intf->hw_cmd(
  2456. ctx->hw_mgr_intf->hw_mgr_priv,
  2457. &hw_cmd_args);
  2458. if (rc) {
  2459. CAM_ERR(CAM_ISP, "HW command failed");
  2460. return rc;
  2461. }
  2462. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  2463. CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
  2464. last_cdm_done_req);
  2465. if (last_cdm_done_req >= req->request_id) {
  2466. CAM_DBG(CAM_ISP,
  2467. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  2468. req->request_id);
  2469. ctx_isp->bubble_frame_cnt = 0;
  2470. } else {
  2471. CAM_DBG(CAM_ISP,
  2472. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  2473. req->request_id);
  2474. req_isp->num_acked = 0;
  2475. req_isp->num_deferred_acks = 0;
  2476. ctx_isp->bubble_frame_cnt = 0;
  2477. req_isp->bubble_detected = false;
  2478. req_isp->cdm_reset_before_apply = true;
  2479. list_del_init(&req->list);
  2480. list_add(&req->list, &ctx->pending_req_list);
  2481. atomic_set(&ctx_isp->process_bubble, 0);
  2482. ctx_isp->active_req_cnt--;
  2483. CAM_DBG(CAM_REQ,
  2484. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  2485. req->request_id,
  2486. ctx_isp->active_req_cnt, ctx->ctx_id);
  2487. }
  2488. } else if (req_isp->bubble_detected) {
  2489. ctx_isp->bubble_frame_cnt++;
  2490. CAM_DBG(CAM_ISP,
  2491. "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
  2492. req->request_id,
  2493. ctx_isp->bubble_frame_cnt);
  2494. } else {
  2495. CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
  2496. req->request_id);
  2497. }
  2498. }
  2499. notify_only:
  2500. /*
  2501. * notify reqmgr with sof signal. Note, due to scheduling delay
  2502. * we can run into situation that two active requests has already
  2503. * be in the active queue while we try to do the notification.
  2504. * In this case, we need to skip the current notification. This
  2505. * helps the state machine to catch up the delay.
  2506. */
  2507. if (ctx_isp->active_req_cnt <= 2) {
  2508. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2509. list_for_each_entry(req, &ctx->active_req_list, list) {
  2510. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2511. if ((!req_isp->bubble_detected) &&
  2512. (req->request_id > ctx_isp->reported_req_id)) {
  2513. request_id = req->request_id;
  2514. __cam_isp_ctx_update_event_record(ctx_isp,
  2515. CAM_ISP_CTX_EVENT_EPOCH, req);
  2516. break;
  2517. }
  2518. }
  2519. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
  2520. request_id = 0;
  2521. if (request_id != 0)
  2522. ctx_isp->reported_req_id = request_id;
  2523. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2524. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2525. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2526. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2527. request_id);
  2528. }
  2529. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  2530. return 0;
  2531. }
  2532. static int __cam_isp_ctx_notify_eof_in_activated_state(
  2533. struct cam_isp_context *ctx_isp, void *evt_data)
  2534. {
  2535. int rc = 0;
  2536. /* notify reqmgr with eof signal */
  2537. rc = __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_EOF, ctx_isp);
  2538. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2539. CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0);
  2540. return rc;
  2541. }
  2542. static int __cam_isp_ctx_reg_upd_in_hw_error(
  2543. struct cam_isp_context *ctx_isp, void *evt_data)
  2544. {
  2545. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2546. return 0;
  2547. }
  2548. static int __cam_isp_ctx_sof_in_activated_state(
  2549. struct cam_isp_context *ctx_isp, void *evt_data)
  2550. {
  2551. int rc = 0;
  2552. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2553. struct cam_ctx_request *req = NULL;
  2554. struct cam_context *ctx = ctx_isp->base;
  2555. uint64_t request_id = 0;
  2556. ctx_isp->last_sof_jiffies = jiffies;
  2557. /* First check if there is a valid request in active list */
  2558. list_for_each_entry(req, &ctx->active_req_list, list) {
  2559. if (req->request_id > ctx_isp->reported_req_id) {
  2560. request_id = req->request_id;
  2561. break;
  2562. }
  2563. }
  2564. /*
  2565. * If nothing in active list, current request might have not moved
  2566. * from wait to active list. This could happen if REG_UPDATE to sw
  2567. * is coming immediately after SOF
  2568. */
  2569. if (request_id == 0) {
  2570. req = list_first_entry(&ctx->wait_req_list,
  2571. struct cam_ctx_request, list);
  2572. if (req)
  2573. request_id = req->request_id;
  2574. }
  2575. if (!evt_data) {
  2576. CAM_ERR(CAM_ISP, "in valid sof event data");
  2577. return -EINVAL;
  2578. }
  2579. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2580. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2581. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2582. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u request %llu",
  2583. ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id, request_id);
  2584. return rc;
  2585. }
  2586. static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2587. void *evt_data)
  2588. {
  2589. int rc = 0;
  2590. struct cam_ctx_request *req = NULL;
  2591. struct cam_isp_ctx_req *req_isp;
  2592. struct cam_context *ctx = ctx_isp->base;
  2593. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2594. CAM_DBG(CAM_ISP, "invalid RUP");
  2595. goto end;
  2596. }
  2597. /*
  2598. * This is for the first update. The initial setting will
  2599. * cause the reg_upd in the first frame.
  2600. */
  2601. if (!list_empty(&ctx->wait_req_list)) {
  2602. req = list_first_entry(&ctx->wait_req_list,
  2603. struct cam_ctx_request, list);
  2604. list_del_init(&req->list);
  2605. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2606. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2607. list_add_tail(&req->list, &ctx->free_req_list);
  2608. else
  2609. CAM_ERR(CAM_ISP,
  2610. "receive rup in unexpected state");
  2611. }
  2612. if (req != NULL) {
  2613. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2614. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2615. req->request_id);
  2616. }
  2617. end:
  2618. return rc;
  2619. }
  2620. static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
  2621. void *evt_data)
  2622. {
  2623. uint64_t request_id = 0;
  2624. uint32_t wait_req_cnt = 0;
  2625. uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
  2626. struct cam_ctx_request *req;
  2627. struct cam_isp_ctx_req *req_isp;
  2628. struct cam_context *ctx = ctx_isp->base;
  2629. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2630. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2631. if (!evt_data) {
  2632. CAM_ERR(CAM_ISP, "invalid event data");
  2633. return -EINVAL;
  2634. }
  2635. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2636. if (list_empty(&ctx->wait_req_list)) {
  2637. /*
  2638. * If no wait req in epoch, this is an error case.
  2639. * The recovery is to go back to sof state
  2640. */
  2641. CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
  2642. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2643. /* Send SOF event as empty frame*/
  2644. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2645. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2646. __cam_isp_ctx_update_event_record(ctx_isp,
  2647. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2648. goto end;
  2649. }
  2650. if (ctx_isp->last_applied_jiffies >= ctx_isp->last_sof_jiffies) {
  2651. list_for_each_entry(req, &ctx->wait_req_list, list) {
  2652. wait_req_cnt++;
  2653. }
  2654. /*
  2655. * The previous req is applied after SOF and there is only
  2656. * one applied req, we don't need to report bubble for this case.
  2657. */
  2658. if (wait_req_cnt == 1) {
  2659. req = list_first_entry(&ctx->wait_req_list,
  2660. struct cam_ctx_request, list);
  2661. request_id = req->request_id;
  2662. CAM_INFO(CAM_ISP,
  2663. "ctx:%d Don't report the bubble for req:%lld",
  2664. ctx->ctx_id, request_id);
  2665. goto end;
  2666. }
  2667. }
  2668. /* Update state prior to notifying CRM */
  2669. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2670. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2671. list);
  2672. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2673. req_isp->bubble_detected = true;
  2674. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2675. req_isp->cdm_reset_before_apply = false;
  2676. atomic_set(&ctx_isp->process_bubble, 1);
  2677. CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
  2678. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2679. if (req_isp->bubble_report) {
  2680. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2681. req->request_id, ctx_isp);
  2682. trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
  2683. req->request_id, ctx->ctx_id);
  2684. } else {
  2685. req_isp->bubble_report = 0;
  2686. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2687. req->request_id, ctx->ctx_id);
  2688. if (ctx_isp->active_req_cnt <= 1)
  2689. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2690. }
  2691. /*
  2692. * Always move the request to active list. Let buf done
  2693. * function handles the rest.
  2694. */
  2695. list_del_init(&req->list);
  2696. list_add_tail(&req->list, &ctx->active_req_list);
  2697. ctx_isp->active_req_cnt++;
  2698. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
  2699. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2700. /*
  2701. * Handle the deferred buf done after moving
  2702. * the bubble req to active req list.
  2703. */
  2704. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2705. ctx_isp, req);
  2706. /*
  2707. * Update the record before req pointer to
  2708. * other invalid req.
  2709. */
  2710. __cam_isp_ctx_update_event_record(ctx_isp,
  2711. CAM_ISP_CTX_EVENT_EPOCH, req);
  2712. /*
  2713. * Get the req again from active_req_list in case
  2714. * the active req cnt is 2.
  2715. */
  2716. list_for_each_entry(req, &ctx->active_req_list, list) {
  2717. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2718. if ((!req_isp->bubble_report) &&
  2719. (req->request_id > ctx_isp->reported_req_id)) {
  2720. request_id = req->request_id;
  2721. ctx_isp->reported_req_id = request_id;
  2722. CAM_DBG(CAM_ISP,
  2723. "ctx %d reported_req_id update to %lld",
  2724. ctx->ctx_id, ctx_isp->reported_req_id);
  2725. break;
  2726. }
  2727. }
  2728. if ((request_id != 0) && req_isp->bubble_detected)
  2729. sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR;
  2730. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2731. sof_event_status);
  2732. cam_req_mgr_debug_delay_detect();
  2733. trace_cam_delay_detect("ISP",
  2734. "bubble epoch_in_applied", req->request_id,
  2735. ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
  2736. CAM_DEFAULT_VALUE);
  2737. end:
  2738. if (request_id == 0) {
  2739. req = list_last_entry(&ctx->active_req_list,
  2740. struct cam_ctx_request, list);
  2741. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2742. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2743. } else {
  2744. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2745. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
  2746. }
  2747. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2748. __cam_isp_ctx_substate_val_to_type(
  2749. ctx_isp->substate_activated));
  2750. return 0;
  2751. }
  2752. static int __cam_isp_ctx_buf_done_in_sof(struct cam_isp_context *ctx_isp,
  2753. void *evt_data)
  2754. {
  2755. int rc = 0;
  2756. struct cam_isp_hw_done_event_data *done =
  2757. (struct cam_isp_hw_done_event_data *) evt_data;
  2758. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2759. return rc;
  2760. }
  2761. static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
  2762. void *evt_data)
  2763. {
  2764. int rc = 0;
  2765. struct cam_isp_hw_done_event_data *done =
  2766. (struct cam_isp_hw_done_event_data *) evt_data;
  2767. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2768. return rc;
  2769. }
  2770. static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
  2771. void *evt_data)
  2772. {
  2773. int rc = 0;
  2774. struct cam_context *ctx = ctx_isp->base;
  2775. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2776. struct cam_ctx_request *req;
  2777. if (!evt_data) {
  2778. CAM_ERR(CAM_ISP, "in valid sof event data");
  2779. return -EINVAL;
  2780. }
  2781. ctx_isp->last_sof_jiffies = jiffies;
  2782. if (atomic_read(&ctx_isp->apply_in_progress))
  2783. CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
  2784. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2785. if (list_empty(&ctx->active_req_list))
  2786. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2787. else
  2788. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  2789. req = list_last_entry(&ctx->active_req_list,
  2790. struct cam_ctx_request, list);
  2791. if (req)
  2792. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2793. CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
  2794. req->request_id);
  2795. if (ctx_isp->frame_id == 1)
  2796. CAM_INFO(CAM_ISP,
  2797. "First SOF in EPCR ctx:%d frame_id:%lld next substate %s",
  2798. ctx->ctx_id, ctx_isp->frame_id,
  2799. __cam_isp_ctx_substate_val_to_type(
  2800. ctx_isp->substate_activated));
  2801. CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
  2802. ctx->ctx_id, ctx_isp->frame_id,
  2803. __cam_isp_ctx_substate_val_to_type(
  2804. ctx_isp->substate_activated));
  2805. return rc;
  2806. }
  2807. static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2808. void *evt_data)
  2809. {
  2810. int rc = 0;
  2811. struct cam_isp_hw_done_event_data *done =
  2812. (struct cam_isp_hw_done_event_data *) evt_data;
  2813. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2814. return rc;
  2815. }
  2816. static int __cam_isp_ctx_buf_done_in_bubble(
  2817. struct cam_isp_context *ctx_isp, void *evt_data)
  2818. {
  2819. int rc = 0;
  2820. struct cam_isp_hw_done_event_data *done =
  2821. (struct cam_isp_hw_done_event_data *) evt_data;
  2822. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2823. return rc;
  2824. }
  2825. static int __cam_isp_ctx_epoch_in_bubble_applied(
  2826. struct cam_isp_context *ctx_isp, void *evt_data)
  2827. {
  2828. uint64_t request_id = 0;
  2829. struct cam_ctx_request *req;
  2830. struct cam_isp_ctx_req *req_isp;
  2831. struct cam_context *ctx = ctx_isp->base;
  2832. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2833. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2834. if (!evt_data) {
  2835. CAM_ERR(CAM_ISP, "invalid event data");
  2836. return -EINVAL;
  2837. }
  2838. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2839. /*
  2840. * This means we missed the reg upd ack. So we need to
  2841. * transition to BUBBLE state again.
  2842. */
  2843. if (list_empty(&ctx->wait_req_list)) {
  2844. /*
  2845. * If no pending req in epoch, this is an error case.
  2846. * Just go back to the bubble state.
  2847. */
  2848. CAM_ERR(CAM_ISP, "ctx:%d No pending request.", ctx->ctx_id);
  2849. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2850. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2851. __cam_isp_ctx_update_event_record(ctx_isp,
  2852. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2853. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2854. goto end;
  2855. }
  2856. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2857. list);
  2858. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2859. req_isp->bubble_detected = true;
  2860. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  2861. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2862. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2863. req_isp->cdm_reset_before_apply = false;
  2864. if (req_isp->bubble_report) {
  2865. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2866. req->request_id, ctx_isp);
  2867. atomic_set(&ctx_isp->process_bubble, 1);
  2868. } else {
  2869. req_isp->bubble_report = 0;
  2870. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2871. req->request_id, ctx->ctx_id);
  2872. if (ctx_isp->active_req_cnt <= 1)
  2873. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2874. atomic_set(&ctx_isp->process_bubble, 1);
  2875. }
  2876. /*
  2877. * Always move the request to active list. Let buf done
  2878. * function handles the rest.
  2879. */
  2880. list_del_init(&req->list);
  2881. list_add_tail(&req->list, &ctx->active_req_list);
  2882. ctx_isp->active_req_cnt++;
  2883. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u",
  2884. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2885. /*
  2886. * Handle the deferred buf done after moving
  2887. * the bubble req to active req list.
  2888. */
  2889. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2890. ctx_isp, req);
  2891. if (!req_isp->bubble_detected) {
  2892. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2893. list);
  2894. req_isp->bubble_detected = true;
  2895. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2896. req_isp->cdm_reset_before_apply = false;
  2897. atomic_set(&ctx_isp->process_bubble, 1);
  2898. list_del_init(&req->list);
  2899. list_add_tail(&req->list, &ctx->active_req_list);
  2900. ctx_isp->active_req_cnt++;
  2901. }
  2902. if (!req_isp->bubble_report) {
  2903. if (req->request_id > ctx_isp->reported_req_id) {
  2904. request_id = req->request_id;
  2905. ctx_isp->reported_req_id = request_id;
  2906. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2907. CAM_REQ_MGR_SOF_EVENT_ERROR);
  2908. __cam_isp_ctx_update_event_record(ctx_isp,
  2909. CAM_ISP_CTX_EVENT_EPOCH, req);
  2910. } else {
  2911. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2912. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2913. __cam_isp_ctx_update_event_record(ctx_isp,
  2914. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2915. }
  2916. } else {
  2917. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2918. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2919. __cam_isp_ctx_update_event_record(ctx_isp,
  2920. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2921. }
  2922. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2923. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2924. __cam_isp_ctx_substate_val_to_type(
  2925. ctx_isp->substate_activated));
  2926. cam_req_mgr_debug_delay_detect();
  2927. trace_cam_delay_detect("ISP",
  2928. "bubble epoch_in_bubble_applied",
  2929. req->request_id, ctx->ctx_id,
  2930. ctx->link_hdl, ctx->session_hdl,
  2931. CAM_DEFAULT_VALUE);
  2932. end:
  2933. req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
  2934. list);
  2935. if (req)
  2936. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2937. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2938. return 0;
  2939. }
  2940. static int __cam_isp_ctx_buf_done_in_bubble_applied(
  2941. struct cam_isp_context *ctx_isp, void *evt_data)
  2942. {
  2943. int rc = 0;
  2944. struct cam_isp_hw_done_event_data *done =
  2945. (struct cam_isp_hw_done_event_data *) evt_data;
  2946. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2947. return rc;
  2948. }
  2949. static void __cam_isp_get_notification_evt_params(
  2950. uint32_t hw_error, uint32_t *fence_evt_cause,
  2951. uint32_t *req_mgr_err_code, uint32_t *recovery_type)
  2952. {
  2953. uint32_t err_type, err_code = 0, recovery_type_temp;
  2954. err_type = CAM_SYNC_ISP_EVENT_UNKNOWN;
  2955. recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2956. if (hw_error & CAM_ISP_HW_ERROR_OVERFLOW) {
  2957. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2958. err_type = CAM_SYNC_ISP_EVENT_OVERFLOW;
  2959. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2960. }
  2961. if (hw_error & CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW) {
  2962. err_code |= CAM_REQ_MGR_CSID_FIFO_OVERFLOW_ERROR;
  2963. err_type = CAM_SYNC_ISP_EVENT_CSID_OUTPUT_FIFO_OVERFLOW;
  2964. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2965. }
  2966. if (hw_error & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) {
  2967. err_code |= CAM_REQ_MGR_CSID_RECOVERY_OVERFLOW_ERROR;
  2968. err_type = CAM_SYNC_ISP_EVENT_RECOVERY_OVERFLOW;
  2969. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2970. }
  2971. if (hw_error & CAM_ISP_HW_ERROR_P2I_ERROR) {
  2972. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2973. err_type = CAM_SYNC_ISP_EVENT_P2I_ERROR;
  2974. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2975. }
  2976. if (hw_error & CAM_ISP_HW_ERROR_VIOLATION) {
  2977. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2978. err_type = CAM_SYNC_ISP_EVENT_VIOLATION;
  2979. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2980. }
  2981. if (hw_error & CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) {
  2982. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2983. err_type = CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW;
  2984. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2985. }
  2986. if (hw_error & CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR) {
  2987. err_code |= CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING;
  2988. err_type = CAM_SYNC_ISP_EVENT_CSID_SENSOR_SWITCH_ERROR;
  2989. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2990. }
  2991. if (hw_error & CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW) {
  2992. err_code |= CAM_REQ_MGR_CSID_LANE_FIFO_OVERFLOW_ERROR;
  2993. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2994. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2995. }
  2996. if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED) {
  2997. err_code |= CAM_REQ_MGR_CSID_RX_PKT_HDR_CORRUPTION;
  2998. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2999. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  3000. }
  3001. if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA) {
  3002. err_code |= CAM_REQ_MGR_CSID_MISSING_PKT_HDR_DATA;
  3003. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  3004. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  3005. }
  3006. if (hw_error & CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME) {
  3007. err_code |= CAM_REQ_MGR_CSID_UNBOUNDED_FRAME;
  3008. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  3009. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  3010. }
  3011. if (hw_error & CAM_ISP_HW_ERROR_CSID_FRAME_SIZE) {
  3012. err_code |= CAM_REQ_MGR_CSID_PIXEL_COUNT_MISMATCH;
  3013. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  3014. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  3015. }
  3016. if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_EOT) {
  3017. err_code |= CAM_REQ_MGR_CSID_MISSING_EOT;
  3018. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  3019. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  3020. }
  3021. if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED) {
  3022. err_code |= CAM_REQ_MGR_CSID_RX_PKT_PAYLOAD_CORRUPTION;
  3023. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  3024. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  3025. }
  3026. if (recovery_type_temp == (CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY |
  3027. CAM_REQ_MGR_ERROR_TYPE_RECOVERY))
  3028. recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  3029. if (!err_code)
  3030. err_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  3031. *req_mgr_err_code = err_code;
  3032. *fence_evt_cause = err_type;
  3033. *recovery_type = recovery_type_temp;
  3034. }
  3035. static bool __cam_isp_ctx_request_can_reapply(
  3036. struct cam_isp_ctx_req *req_isp)
  3037. {
  3038. int i;
  3039. for (i = 0; i < req_isp->num_fence_map_out; i++)
  3040. if (req_isp->fence_map_out[i].sync_id == -1)
  3041. return false;
  3042. return true;
  3043. }
  3044. static int __cam_isp_ctx_validate_for_req_reapply_util(
  3045. struct cam_isp_context *ctx_isp)
  3046. {
  3047. int rc = 0;
  3048. struct cam_ctx_request *req_temp;
  3049. struct cam_ctx_request *req = NULL;
  3050. struct cam_isp_ctx_req *req_isp = NULL;
  3051. struct cam_context *ctx = ctx_isp->base;
  3052. /* Check for req in active/wait lists */
  3053. if (list_empty(&ctx->active_req_list)) {
  3054. CAM_DBG(CAM_ISP,
  3055. "Active request list empty for ctx: %u on link: 0x%x",
  3056. ctx->ctx_id, ctx->link_hdl);
  3057. if (list_empty(&ctx->wait_req_list)) {
  3058. CAM_WARN(CAM_ISP,
  3059. "No active/wait req for ctx: %u on link: 0x%x start from pending",
  3060. ctx->ctx_id, ctx->link_hdl);
  3061. rc = 0;
  3062. goto end;
  3063. }
  3064. }
  3065. /* Validate if all fences for active requests are not signaled */
  3066. if (!list_empty(&ctx->active_req_list)) {
  3067. list_for_each_entry_safe_reverse(req, req_temp,
  3068. &ctx->active_req_list, list) {
  3069. /*
  3070. * If some fences of the active request are already
  3071. * signaled, we should not do recovery for the buffer
  3072. * and timestamp consistency.
  3073. */
  3074. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3075. if (!__cam_isp_ctx_request_can_reapply(req_isp)) {
  3076. CAM_WARN(CAM_ISP,
  3077. "Req: %llu in ctx:%u on link: 0x%x fence has partially signaled, cannot do recovery",
  3078. req->request_id, ctx->ctx_id, ctx->link_hdl);
  3079. rc = -EINVAL;
  3080. goto end;
  3081. }
  3082. }
  3083. }
  3084. /* Move active requests to pending list */
  3085. if (!list_empty(&ctx->active_req_list)) {
  3086. list_for_each_entry_safe_reverse(req, req_temp,
  3087. &ctx->active_req_list, list) {
  3088. list_del_init(&req->list);
  3089. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  3090. ctx_isp->active_req_cnt--;
  3091. CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move active req %llu to pending",
  3092. ctx->ctx_id, ctx->link_hdl, req->request_id);
  3093. }
  3094. }
  3095. /* Move wait requests to pending list */
  3096. if (!list_empty(&ctx->wait_req_list)) {
  3097. list_for_each_entry_safe_reverse(req, req_temp, &ctx->wait_req_list, list) {
  3098. list_del_init(&req->list);
  3099. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  3100. CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move wait req %llu to pending",
  3101. ctx->ctx_id, ctx->link_hdl, req->request_id);
  3102. }
  3103. }
  3104. end:
  3105. return rc;
  3106. }
  3107. static int __cam_isp_ctx_handle_recovery_req_util(
  3108. struct cam_isp_context *ctx_isp)
  3109. {
  3110. int rc = 0;
  3111. struct cam_context *ctx = ctx_isp->base;
  3112. struct cam_ctx_request *req_to_reapply = NULL;
  3113. struct cam_isp_ctx_req *req_isp = NULL;
  3114. if (list_empty(&ctx->pending_req_list)) {
  3115. CAM_WARN(CAM_ISP,
  3116. "No pending request to recover from on ctx: %u", ctx->ctx_id);
  3117. return -EINVAL;
  3118. }
  3119. req_to_reapply = list_first_entry(&ctx->pending_req_list,
  3120. struct cam_ctx_request, list);
  3121. req_isp = (struct cam_isp_ctx_req *)req_to_reapply->req_priv;
  3122. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  3123. ctx_isp->recovery_req_id = req_to_reapply->request_id;
  3124. atomic_set(&ctx_isp->internal_recovery_set, 1);
  3125. CAM_INFO(CAM_ISP, "Notify CRM to reapply req:%llu for ctx:%u link:0x%x",
  3126. req_to_reapply->request_id, ctx->ctx_id, ctx->link_hdl);
  3127. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  3128. CRM_KMD_WARN_INTERNAL_RECOVERY, req_to_reapply->request_id,
  3129. ctx_isp);
  3130. if (rc) {
  3131. /* Unable to notify CRM to do reapply back to normal */
  3132. CAM_WARN(CAM_ISP,
  3133. "ctx:%u unable to notify CRM for req %llu",
  3134. ctx->ctx_id, ctx_isp->recovery_req_id);
  3135. ctx_isp->recovery_req_id = 0;
  3136. atomic_set(&ctx_isp->internal_recovery_set, 0);
  3137. }
  3138. return rc;
  3139. }
  3140. static int __cam_isp_ctx_trigger_error_req_reapply(
  3141. uint32_t err_type, struct cam_isp_context *ctx_isp)
  3142. {
  3143. int rc = 0;
  3144. struct cam_context *ctx = ctx_isp->base;
  3145. if ((err_type & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) &&
  3146. (isp_ctx_debug.disable_internal_recovery_mask &
  3147. CAM_ISP_CTX_DISABLE_RECOVERY_BUS_OVERFLOW))
  3148. return -EINVAL;
  3149. /*
  3150. * For errors that can be recoverable within kmd, we
  3151. * try to do internal hw stop, restart and notify CRM
  3152. * to do reapply with the help of bubble control flow.
  3153. */
  3154. rc = __cam_isp_ctx_validate_for_req_reapply_util(ctx_isp);
  3155. if (rc)
  3156. goto end;
  3157. rc = __cam_isp_ctx_handle_recovery_req_util(ctx_isp);
  3158. if (rc)
  3159. goto end;
  3160. CAM_DBG(CAM_ISP, "Triggered internal recovery for req:%llu ctx:%u on link 0x%x",
  3161. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3162. end:
  3163. return rc;
  3164. }
  3165. static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
  3166. void *evt_data)
  3167. {
  3168. int rc = 0;
  3169. enum cam_req_mgr_device_error error;
  3170. uint32_t i = 0;
  3171. bool found = 0;
  3172. struct cam_ctx_request *req = NULL;
  3173. struct cam_ctx_request *req_to_report = NULL;
  3174. struct cam_ctx_request *req_to_dump = NULL;
  3175. struct cam_ctx_request *req_temp;
  3176. struct cam_isp_ctx_req *req_isp = NULL;
  3177. struct cam_isp_ctx_req *req_isp_to_report = NULL;
  3178. uint64_t error_request_id;
  3179. struct cam_hw_fence_map_entry *fence_map_out = NULL;
  3180. uint32_t recovery_type, fence_evt_cause;
  3181. uint32_t req_mgr_err_code;
  3182. struct cam_context *ctx = ctx_isp->base;
  3183. struct cam_isp_hw_error_event_data *error_event_data =
  3184. (struct cam_isp_hw_error_event_data *)evt_data;
  3185. CAM_DBG(CAM_ISP, "Enter HW error_type = %d", error_event_data->error_type);
  3186. if (error_event_data->try_internal_recovery) {
  3187. rc = __cam_isp_ctx_trigger_error_req_reapply(error_event_data->error_type, ctx_isp);
  3188. if (!rc)
  3189. goto exit;
  3190. }
  3191. if (!ctx_isp->offline_context)
  3192. __cam_isp_ctx_pause_crm_timer(ctx);
  3193. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3194. __cam_isp_get_notification_evt_params(error_event_data->error_type,
  3195. &fence_evt_cause, &req_mgr_err_code, &recovery_type);
  3196. /*
  3197. * The error is likely caused by first request on the active list.
  3198. * If active list is empty check wait list (maybe error hit as soon
  3199. * as RUP and we handle error before RUP.
  3200. */
  3201. if (list_empty(&ctx->active_req_list)) {
  3202. CAM_DBG(CAM_ISP,
  3203. "handling error with no active request");
  3204. if (list_empty(&ctx->wait_req_list)) {
  3205. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3206. "Error with no active/wait request");
  3207. goto end;
  3208. } else {
  3209. req_to_dump = list_first_entry(&ctx->wait_req_list,
  3210. struct cam_ctx_request, list);
  3211. }
  3212. } else {
  3213. req_to_dump = list_first_entry(&ctx->active_req_list,
  3214. struct cam_ctx_request, list);
  3215. }
  3216. req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
  3217. if (error_event_data->enable_req_dump)
  3218. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  3219. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3220. CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
  3221. list_for_each_entry_safe(req, req_temp,
  3222. &ctx->active_req_list, list) {
  3223. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3224. if (!req_isp->bubble_report) {
  3225. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  3226. req->request_id);
  3227. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3228. fence_map_out =
  3229. &req_isp->fence_map_out[i];
  3230. if (req_isp->fence_map_out[i].sync_id != -1) {
  3231. CAM_DBG(CAM_ISP,
  3232. "req %llu, Sync fd 0x%x ctx %u",
  3233. req->request_id,
  3234. req_isp->fence_map_out[i].sync_id,
  3235. ctx->ctx_id);
  3236. rc = cam_sync_signal(
  3237. fence_map_out->sync_id,
  3238. CAM_SYNC_STATE_SIGNALED_ERROR,
  3239. fence_evt_cause);
  3240. fence_map_out->sync_id = -1;
  3241. }
  3242. }
  3243. list_del_init(&req->list);
  3244. list_add_tail(&req->list, &ctx->free_req_list);
  3245. ctx_isp->active_req_cnt--;
  3246. } else {
  3247. found = 1;
  3248. break;
  3249. }
  3250. }
  3251. if (found)
  3252. goto move_to_pending;
  3253. list_for_each_entry_safe(req, req_temp,
  3254. &ctx->wait_req_list, list) {
  3255. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3256. if (!req_isp->bubble_report) {
  3257. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  3258. req->request_id);
  3259. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3260. fence_map_out =
  3261. &req_isp->fence_map_out[i];
  3262. if (req_isp->fence_map_out[i].sync_id != -1) {
  3263. CAM_DBG(CAM_ISP,
  3264. "req %llu, Sync fd 0x%x ctx %u",
  3265. req->request_id,
  3266. req_isp->fence_map_out[i].sync_id,
  3267. ctx->ctx_id);
  3268. rc = cam_sync_signal(
  3269. fence_map_out->sync_id,
  3270. CAM_SYNC_STATE_SIGNALED_ERROR,
  3271. fence_evt_cause);
  3272. fence_map_out->sync_id = -1;
  3273. }
  3274. }
  3275. list_del_init(&req->list);
  3276. list_add_tail(&req->list, &ctx->free_req_list);
  3277. } else {
  3278. found = 1;
  3279. break;
  3280. }
  3281. }
  3282. move_to_pending:
  3283. /*
  3284. * If bubble recovery is enabled on any request we need to move that
  3285. * request and all the subsequent requests to the pending list.
  3286. * Note:
  3287. * We need to traverse the active list in reverse order and add
  3288. * to head of pending list.
  3289. * e.g. pending current state: 10, 11 | active current state: 8, 9
  3290. * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
  3291. * final state - pending: 8, 9, 10, 11 | active: NULL
  3292. */
  3293. if (found) {
  3294. list_for_each_entry_safe_reverse(req, req_temp,
  3295. &ctx->active_req_list, list) {
  3296. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3297. list_del_init(&req->list);
  3298. list_add(&req->list, &ctx->pending_req_list);
  3299. ctx_isp->active_req_cnt--;
  3300. }
  3301. list_for_each_entry_safe_reverse(req, req_temp,
  3302. &ctx->wait_req_list, list) {
  3303. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3304. list_del_init(&req->list);
  3305. list_add(&req->list, &ctx->pending_req_list);
  3306. }
  3307. }
  3308. end:
  3309. do {
  3310. if (list_empty(&ctx->pending_req_list)) {
  3311. error_request_id = ctx_isp->last_applied_req_id;
  3312. req_isp = NULL;
  3313. break;
  3314. }
  3315. req = list_first_entry(&ctx->pending_req_list,
  3316. struct cam_ctx_request, list);
  3317. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3318. error_request_id = ctx_isp->last_applied_req_id;
  3319. if (req_isp->bubble_report) {
  3320. req_to_report = req;
  3321. req_isp_to_report = req_to_report->req_priv;
  3322. break;
  3323. }
  3324. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3325. if (req_isp->fence_map_out[i].sync_id != -1)
  3326. rc = cam_sync_signal(
  3327. req_isp->fence_map_out[i].sync_id,
  3328. CAM_SYNC_STATE_SIGNALED_ERROR,
  3329. fence_evt_cause);
  3330. req_isp->fence_map_out[i].sync_id = -1;
  3331. }
  3332. list_del_init(&req->list);
  3333. list_add_tail(&req->list, &ctx->free_req_list);
  3334. } while (req->request_id < ctx_isp->last_applied_req_id);
  3335. if (ctx_isp->offline_context)
  3336. goto exit;
  3337. error = CRM_KMD_ERR_FATAL;
  3338. if (req_isp_to_report && req_isp_to_report->bubble_report)
  3339. if (error_event_data->recovery_enabled)
  3340. error = CRM_KMD_ERR_BUBBLE;
  3341. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, error,
  3342. error_request_id, ctx_isp);
  3343. /*
  3344. * Need to send error occurred in KMD
  3345. * This will help UMD to take necessary action
  3346. * and to dump relevant info
  3347. */
  3348. if (error == CRM_KMD_ERR_FATAL)
  3349. __cam_isp_ctx_notify_v4l2_error_event(recovery_type,
  3350. req_mgr_err_code, error_request_id, ctx);
  3351. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
  3352. CAM_DBG(CAM_ISP, "Handling error done on ctx: %u", ctx->ctx_id);
  3353. exit:
  3354. return rc;
  3355. }
  3356. static int __cam_isp_ctx_fs2_sof_in_sof_state(
  3357. struct cam_isp_context *ctx_isp, void *evt_data)
  3358. {
  3359. int rc = 0;
  3360. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3361. struct cam_ctx_request *req;
  3362. struct cam_context *ctx = ctx_isp->base;
  3363. uint64_t request_id = 0;
  3364. if (!evt_data) {
  3365. CAM_ERR(CAM_ISP, "in valid sof event data");
  3366. return -EINVAL;
  3367. }
  3368. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  3369. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3370. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3371. if (!(list_empty(&ctx->wait_req_list)))
  3372. goto end;
  3373. if (ctx_isp->active_req_cnt <= 2) {
  3374. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3375. list_for_each_entry(req, &ctx->active_req_list, list) {
  3376. if (req->request_id > ctx_isp->reported_req_id) {
  3377. request_id = req->request_id;
  3378. ctx_isp->reported_req_id = request_id;
  3379. break;
  3380. }
  3381. }
  3382. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3383. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3384. }
  3385. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3386. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  3387. end:
  3388. return rc;
  3389. }
  3390. static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp,
  3391. void *evt_data)
  3392. {
  3393. int rc = 0;
  3394. struct cam_isp_hw_done_event_data *done =
  3395. (struct cam_isp_hw_done_event_data *) evt_data;
  3396. struct cam_context *ctx = ctx_isp->base;
  3397. int prev_active_req_cnt = 0;
  3398. int curr_req_id = 0;
  3399. struct cam_ctx_request *req;
  3400. prev_active_req_cnt = ctx_isp->active_req_cnt;
  3401. req = list_first_entry(&ctx->active_req_list,
  3402. struct cam_ctx_request, list);
  3403. if (req)
  3404. curr_req_id = req->request_id;
  3405. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  3406. if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) {
  3407. if (list_empty(&ctx->wait_req_list) &&
  3408. list_empty(&ctx->active_req_list)) {
  3409. CAM_DBG(CAM_ISP, "No request, move to SOF");
  3410. ctx_isp->substate_activated =
  3411. CAM_ISP_CTX_ACTIVATED_SOF;
  3412. if (ctx_isp->reported_req_id < curr_req_id) {
  3413. ctx_isp->reported_req_id = curr_req_id;
  3414. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  3415. curr_req_id,
  3416. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3417. }
  3418. }
  3419. }
  3420. return rc;
  3421. }
  3422. static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  3423. void *evt_data)
  3424. {
  3425. int rc = 0;
  3426. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  3427. return rc;
  3428. }
  3429. static int __cam_isp_ctx_fs2_buf_done_in_applied(
  3430. struct cam_isp_context *ctx_isp,
  3431. void *evt_data)
  3432. {
  3433. int rc = 0;
  3434. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  3435. return rc;
  3436. }
  3437. static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  3438. void *evt_data)
  3439. {
  3440. int rc = 0;
  3441. struct cam_ctx_request *req = NULL;
  3442. struct cam_isp_ctx_req *req_isp;
  3443. struct cam_context *ctx = ctx_isp->base;
  3444. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  3445. CAM_DBG(CAM_ISP, "invalid RUP");
  3446. goto end;
  3447. }
  3448. /*
  3449. * This is for the first update. The initial setting will
  3450. * cause the reg_upd in the first frame.
  3451. */
  3452. if (!list_empty(&ctx->wait_req_list)) {
  3453. req = list_first_entry(&ctx->wait_req_list,
  3454. struct cam_ctx_request, list);
  3455. list_del_init(&req->list);
  3456. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3457. if (req_isp->num_fence_map_out == req_isp->num_acked)
  3458. list_add_tail(&req->list, &ctx->free_req_list);
  3459. else
  3460. CAM_ERR(CAM_ISP,
  3461. "receive rup in unexpected state");
  3462. }
  3463. if (req != NULL) {
  3464. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3465. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3466. req->request_id);
  3467. }
  3468. end:
  3469. return rc;
  3470. }
  3471. static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
  3472. struct cam_isp_context *ctx_isp, void *evt_data)
  3473. {
  3474. int rc = 0;
  3475. struct cam_ctx_request *req = NULL;
  3476. struct cam_context *ctx = ctx_isp->base;
  3477. struct cam_isp_ctx_req *req_isp;
  3478. uint64_t request_id = 0;
  3479. if (list_empty(&ctx->wait_req_list)) {
  3480. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  3481. goto end;
  3482. }
  3483. req = list_first_entry(&ctx->wait_req_list,
  3484. struct cam_ctx_request, list);
  3485. list_del_init(&req->list);
  3486. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3487. if (req_isp->num_fence_map_out != 0) {
  3488. list_add_tail(&req->list, &ctx->active_req_list);
  3489. ctx_isp->active_req_cnt++;
  3490. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
  3491. req->request_id, ctx_isp->active_req_cnt);
  3492. } else {
  3493. /* no io config, so the request is completed. */
  3494. list_add_tail(&req->list, &ctx->free_req_list);
  3495. }
  3496. /*
  3497. * This function only called directly from applied and bubble applied
  3498. * state so change substate here.
  3499. */
  3500. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  3501. if (req_isp->num_fence_map_out != 1)
  3502. goto end;
  3503. if (ctx_isp->active_req_cnt <= 2) {
  3504. list_for_each_entry(req, &ctx->active_req_list, list) {
  3505. if (req->request_id > ctx_isp->reported_req_id) {
  3506. request_id = req->request_id;
  3507. ctx_isp->reported_req_id = request_id;
  3508. break;
  3509. }
  3510. }
  3511. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3512. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3513. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3514. }
  3515. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3516. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated));
  3517. end:
  3518. if (req != NULL && !rc) {
  3519. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3520. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3521. req->request_id);
  3522. }
  3523. return rc;
  3524. }
  3525. static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
  3526. struct cam_isp_context *ctx_isp)
  3527. {
  3528. struct cam_context *ctx = ctx_isp->base;
  3529. if ((++ctx_isp->aeb_error_cnt) <= CAM_ISP_CONTEXT_AEB_ERROR_CNT_MAX) {
  3530. CAM_WARN(CAM_ISP,
  3531. "AEB slave RDI's current request's SOF seen after next req is applied for ctx: %u on link: 0x%x last_applied_req: %llu err_cnt: %u",
  3532. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, ctx_isp->aeb_error_cnt);
  3533. return;
  3534. }
  3535. CAM_ERR(CAM_ISP,
  3536. "Fatal - AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x err_cnt: %u",
  3537. ctx->ctx_id, ctx->link_hdl, ctx_isp->aeb_error_cnt);
  3538. /* Pause CRM timer */
  3539. if (!ctx_isp->offline_context)
  3540. __cam_isp_ctx_pause_crm_timer(ctx);
  3541. /* Trigger reg dump */
  3542. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3543. /* Notify CRM on fatal error */
  3544. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL,
  3545. ctx_isp->last_applied_req_id, ctx_isp);
  3546. /* Notify userland on error */
  3547. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
  3548. CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx);
  3549. /* Change state to HALT, stop further processing of HW events */
  3550. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  3551. }
  3552. static int __cam_isp_ctx_trigger_internal_recovery(
  3553. bool sync_frame_drop, struct cam_isp_context *ctx_isp)
  3554. {
  3555. int rc = 0;
  3556. bool do_recovery = true;
  3557. struct cam_context *ctx = ctx_isp->base;
  3558. struct cam_ctx_request *req = NULL;
  3559. struct cam_isp_ctx_req *req_isp = NULL;
  3560. if (list_empty(&ctx->wait_req_list)) {
  3561. /*
  3562. * If the wait list is empty, and we encounter a "silent" frame drop
  3563. * then the settings applied on the previous frame, did not reflect
  3564. * at the next frame boundary, it's expected to latch a frame after.
  3565. * No need to recover. If it's an out of sync drop use pending req
  3566. */
  3567. if (sync_frame_drop && !list_empty(&ctx->pending_req_list))
  3568. req = list_first_entry(&ctx->pending_req_list,
  3569. struct cam_ctx_request, list);
  3570. else
  3571. do_recovery = false;
  3572. }
  3573. /* If both wait and pending list have no request to recover on */
  3574. if (!do_recovery) {
  3575. CAM_WARN(CAM_ISP,
  3576. "No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld",
  3577. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id,
  3578. ctx_isp->req_info.last_bufdone_req_id);
  3579. goto end;
  3580. }
  3581. if (!req) {
  3582. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  3583. if (req->request_id != ctx_isp->last_applied_req_id)
  3584. CAM_WARN(CAM_ISP,
  3585. "Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x",
  3586. req->request_id, ctx_isp->last_applied_req_id,
  3587. ctx->ctx_id, ctx->link_hdl);
  3588. }
  3589. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3590. /*
  3591. * Treat this as bubble, after recovery re-start from appropriate sub-state
  3592. * This will block servicing any further apply calls from CRM
  3593. */
  3594. atomic_set(&ctx_isp->internal_recovery_set, 1);
  3595. atomic_set(&ctx_isp->process_bubble, 1);
  3596. ctx_isp->recovery_req_id = req->request_id;
  3597. /* Wait for active request's to finish before issuing recovery */
  3598. if (ctx_isp->active_req_cnt) {
  3599. req_isp->bubble_detected = true;
  3600. CAM_WARN(CAM_ISP,
  3601. "Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x",
  3602. ctx_isp->active_req_cnt, ctx_isp->recovery_req_id,
  3603. ctx->ctx_id, ctx->link_hdl);
  3604. } else {
  3605. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  3606. ctx_isp->recovery_req_id, ctx_isp);
  3607. if (rc) {
  3608. /* Unable to do bubble recovery reset back to normal */
  3609. CAM_WARN(CAM_ISP,
  3610. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  3611. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3612. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  3613. goto end;
  3614. }
  3615. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  3616. list_del_init(&req->list);
  3617. list_add(&req->list, &ctx->pending_req_list);
  3618. }
  3619. end:
  3620. return rc;
  3621. }
  3622. static int __cam_isp_ctx_handle_secondary_events(
  3623. struct cam_isp_context *ctx_isp, void *evt_data)
  3624. {
  3625. int rc = 0;
  3626. bool recover = false, sync_frame_drop = false;
  3627. struct cam_context *ctx = ctx_isp->base;
  3628. struct cam_isp_hw_secondary_event_data *sec_evt_data =
  3629. (struct cam_isp_hw_secondary_event_data *)evt_data;
  3630. /* Current scheme to handle only for custom AEB */
  3631. if (!ctx_isp->aeb_enabled) {
  3632. CAM_WARN(CAM_ISP,
  3633. "Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u",
  3634. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3635. goto end;
  3636. }
  3637. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3638. CAM_WARN(CAM_ISP,
  3639. "Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u",
  3640. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3641. goto end;
  3642. }
  3643. /*
  3644. * In case of custom AEB ensure first exposure frame has
  3645. * not moved forward with its settings without second/third
  3646. * expoure frame coming in. Also track for bubble, in case of system
  3647. * delays it's possible for the IFE settings to be not written to
  3648. * HW on a given frame. If these scenarios occurs flag as error,
  3649. * and recover.
  3650. */
  3651. switch (sec_evt_data->evt_type) {
  3652. case CAM_ISP_HW_SEC_EVENT_SOF:
  3653. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3654. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
  3655. ctx_isp->last_applied_req_id);
  3656. /* Slave RDI's frame starting post IFE EPOCH - Fatal */
  3657. if ((ctx_isp->substate_activated ==
  3658. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3659. (ctx_isp->substate_activated ==
  3660. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED))
  3661. __cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp);
  3662. else
  3663. /* Reset error count */
  3664. ctx_isp->aeb_error_cnt = 0;
  3665. break;
  3666. case CAM_ISP_HW_SEC_EVENT_EPOCH:
  3667. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3668. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
  3669. ctx_isp->last_applied_req_id);
  3670. /*
  3671. * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP
  3672. * On such occasions use CSID CAMIF EPOCH for bubble detection, flag
  3673. * on detection and perform necessary bubble recovery
  3674. */
  3675. if ((ctx_isp->substate_activated ==
  3676. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3677. (ctx_isp->substate_activated ==
  3678. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
  3679. recover = true;
  3680. CAM_WARN(CAM_ISP,
  3681. "Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3682. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3683. }
  3684. break;
  3685. case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP:
  3686. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3687. CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
  3688. ctx_isp->last_applied_req_id);
  3689. /* Avoid recovery loop if frame is dropped at stream on */
  3690. if (!ctx_isp->frame_id) {
  3691. CAM_ERR(CAM_ISP,
  3692. "Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld",
  3693. ctx->ctx_id, ctx->link_hdl,
  3694. ctx_isp->frame_id, ctx_isp->last_applied_req_id);
  3695. rc = -EPERM;
  3696. break;
  3697. }
  3698. recover = true;
  3699. sync_frame_drop = true;
  3700. CAM_WARN(CAM_ISP,
  3701. "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3702. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3703. break;
  3704. default:
  3705. break;
  3706. }
  3707. if (recover &&
  3708. !(isp_ctx_debug.disable_internal_recovery_mask & CAM_ISP_CTX_DISABLE_RECOVERY_AEB))
  3709. rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp);
  3710. end:
  3711. return rc;
  3712. }
  3713. static struct cam_isp_ctx_irq_ops
  3714. cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3715. /* SOF */
  3716. {
  3717. .irq_ops = {
  3718. __cam_isp_ctx_handle_error,
  3719. __cam_isp_ctx_sof_in_activated_state,
  3720. __cam_isp_ctx_reg_upd_in_sof,
  3721. __cam_isp_ctx_notify_sof_in_activated_state,
  3722. __cam_isp_ctx_notify_eof_in_activated_state,
  3723. __cam_isp_ctx_buf_done_in_sof,
  3724. __cam_isp_ctx_handle_secondary_events,
  3725. },
  3726. },
  3727. /* APPLIED */
  3728. {
  3729. .irq_ops = {
  3730. __cam_isp_ctx_handle_error,
  3731. __cam_isp_ctx_sof_in_activated_state,
  3732. __cam_isp_ctx_reg_upd_in_applied_state,
  3733. __cam_isp_ctx_epoch_in_applied,
  3734. __cam_isp_ctx_notify_eof_in_activated_state,
  3735. __cam_isp_ctx_buf_done_in_applied,
  3736. __cam_isp_ctx_handle_secondary_events,
  3737. },
  3738. },
  3739. /* EPOCH */
  3740. {
  3741. .irq_ops = {
  3742. __cam_isp_ctx_handle_error,
  3743. __cam_isp_ctx_sof_in_epoch,
  3744. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3745. __cam_isp_ctx_notify_sof_in_activated_state,
  3746. __cam_isp_ctx_notify_eof_in_activated_state,
  3747. __cam_isp_ctx_buf_done_in_epoch,
  3748. __cam_isp_ctx_handle_secondary_events,
  3749. },
  3750. },
  3751. /* BUBBLE */
  3752. {
  3753. .irq_ops = {
  3754. __cam_isp_ctx_handle_error,
  3755. __cam_isp_ctx_sof_in_activated_state,
  3756. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3757. __cam_isp_ctx_notify_sof_in_activated_state,
  3758. __cam_isp_ctx_notify_eof_in_activated_state,
  3759. __cam_isp_ctx_buf_done_in_bubble,
  3760. __cam_isp_ctx_handle_secondary_events,
  3761. },
  3762. },
  3763. /* Bubble Applied */
  3764. {
  3765. .irq_ops = {
  3766. __cam_isp_ctx_handle_error,
  3767. __cam_isp_ctx_sof_in_activated_state,
  3768. __cam_isp_ctx_reg_upd_in_applied_state,
  3769. __cam_isp_ctx_epoch_in_bubble_applied,
  3770. NULL,
  3771. __cam_isp_ctx_buf_done_in_bubble_applied,
  3772. __cam_isp_ctx_handle_secondary_events,
  3773. },
  3774. },
  3775. /* HW ERROR */
  3776. {
  3777. .irq_ops = {
  3778. NULL,
  3779. __cam_isp_ctx_sof_in_activated_state,
  3780. __cam_isp_ctx_reg_upd_in_hw_error,
  3781. NULL,
  3782. NULL,
  3783. NULL,
  3784. },
  3785. },
  3786. /* HALT */
  3787. {
  3788. },
  3789. };
  3790. static struct cam_isp_ctx_irq_ops
  3791. cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3792. /* SOF */
  3793. {
  3794. .irq_ops = {
  3795. __cam_isp_ctx_handle_error,
  3796. __cam_isp_ctx_fs2_sof_in_sof_state,
  3797. __cam_isp_ctx_fs2_reg_upd_in_sof,
  3798. __cam_isp_ctx_fs2_sof_in_sof_state,
  3799. __cam_isp_ctx_notify_eof_in_activated_state,
  3800. NULL,
  3801. },
  3802. },
  3803. /* APPLIED */
  3804. {
  3805. .irq_ops = {
  3806. __cam_isp_ctx_handle_error,
  3807. __cam_isp_ctx_sof_in_activated_state,
  3808. __cam_isp_ctx_fs2_reg_upd_in_applied_state,
  3809. __cam_isp_ctx_epoch_in_applied,
  3810. __cam_isp_ctx_notify_eof_in_activated_state,
  3811. __cam_isp_ctx_fs2_buf_done_in_applied,
  3812. },
  3813. },
  3814. /* EPOCH */
  3815. {
  3816. .irq_ops = {
  3817. __cam_isp_ctx_handle_error,
  3818. __cam_isp_ctx_sof_in_epoch,
  3819. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3820. __cam_isp_ctx_notify_sof_in_activated_state,
  3821. __cam_isp_ctx_notify_eof_in_activated_state,
  3822. __cam_isp_ctx_fs2_buf_done_in_epoch,
  3823. },
  3824. },
  3825. /* BUBBLE */
  3826. {
  3827. .irq_ops = {
  3828. __cam_isp_ctx_handle_error,
  3829. __cam_isp_ctx_sof_in_activated_state,
  3830. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3831. __cam_isp_ctx_notify_sof_in_activated_state,
  3832. __cam_isp_ctx_notify_eof_in_activated_state,
  3833. __cam_isp_ctx_buf_done_in_bubble,
  3834. },
  3835. },
  3836. /* Bubble Applied */
  3837. {
  3838. .irq_ops = {
  3839. __cam_isp_ctx_handle_error,
  3840. __cam_isp_ctx_sof_in_activated_state,
  3841. __cam_isp_ctx_reg_upd_in_applied_state,
  3842. __cam_isp_ctx_epoch_in_bubble_applied,
  3843. NULL,
  3844. __cam_isp_ctx_buf_done_in_bubble_applied,
  3845. },
  3846. },
  3847. /* HW ERROR */
  3848. {
  3849. .irq_ops = {
  3850. NULL,
  3851. __cam_isp_ctx_sof_in_activated_state,
  3852. __cam_isp_ctx_reg_upd_in_hw_error,
  3853. NULL,
  3854. NULL,
  3855. NULL,
  3856. },
  3857. },
  3858. /* HALT */
  3859. {
  3860. },
  3861. };
  3862. static struct cam_isp_ctx_irq_ops
  3863. cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3864. /* SOF */
  3865. {
  3866. .irq_ops = {
  3867. __cam_isp_ctx_handle_error,
  3868. NULL,
  3869. NULL,
  3870. NULL,
  3871. NULL,
  3872. NULL,
  3873. },
  3874. },
  3875. /* APPLIED */
  3876. {
  3877. .irq_ops = {
  3878. __cam_isp_ctx_handle_error,
  3879. __cam_isp_ctx_sof_in_activated_state,
  3880. __cam_isp_ctx_reg_upd_in_applied_state,
  3881. __cam_isp_ctx_offline_epoch_in_activated_state,
  3882. NULL,
  3883. __cam_isp_ctx_buf_done_in_applied,
  3884. },
  3885. },
  3886. /* EPOCH */
  3887. {
  3888. .irq_ops = {
  3889. __cam_isp_ctx_handle_error,
  3890. __cam_isp_ctx_sof_in_activated_state,
  3891. NULL,
  3892. __cam_isp_ctx_offline_epoch_in_activated_state,
  3893. NULL,
  3894. __cam_isp_ctx_buf_done_in_epoch,
  3895. },
  3896. },
  3897. /* BUBBLE */
  3898. {
  3899. },
  3900. /* Bubble Applied */
  3901. {
  3902. },
  3903. /* HW ERROR */
  3904. {
  3905. .irq_ops = {
  3906. NULL,
  3907. __cam_isp_ctx_sof_in_activated_state,
  3908. __cam_isp_ctx_reg_upd_in_hw_error,
  3909. NULL,
  3910. NULL,
  3911. NULL,
  3912. },
  3913. },
  3914. /* HALT */
  3915. {
  3916. },
  3917. };
  3918. static inline int cam_isp_context_apply_evt_injection(struct cam_context *ctx)
  3919. {
  3920. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  3921. struct cam_hw_inject_evt_param *evt_inject_params = &ctx_isp->evt_inject_params;
  3922. struct cam_common_evt_inject_data inject_evt = {0};
  3923. int rc;
  3924. inject_evt.evt_params = evt_inject_params;
  3925. rc = cam_context_apply_evt_injection(ctx, &inject_evt);
  3926. if (rc)
  3927. CAM_ERR(CAM_ISP, "Fail to apply event injection ctx_id: %u req_id: %u",
  3928. ctx->ctx_id, evt_inject_params->req_id);
  3929. evt_inject_params->is_valid = false;
  3930. return rc;
  3931. }
  3932. static int __cam_isp_ctx_apply_req_in_activated_state(
  3933. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
  3934. enum cam_isp_ctx_activated_substate next_state)
  3935. {
  3936. int rc = 0;
  3937. struct cam_ctx_request *req;
  3938. struct cam_ctx_request *active_req = NULL;
  3939. struct cam_isp_ctx_req *req_isp;
  3940. struct cam_isp_ctx_req *active_req_isp;
  3941. struct cam_isp_context *ctx_isp = NULL;
  3942. struct cam_hw_config_args cfg = {0};
  3943. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3944. if (apply->re_apply)
  3945. if (apply->request_id <= ctx_isp->last_applied_req_id) {
  3946. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3947. "ctx_id:%d Trying to reapply the same request %llu again",
  3948. ctx->ctx_id,
  3949. apply->request_id);
  3950. return 0;
  3951. }
  3952. if (list_empty(&ctx->pending_req_list)) {
  3953. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3954. "ctx_id:%d No available request for Apply id %lld",
  3955. ctx->ctx_id,
  3956. apply->request_id);
  3957. rc = -EFAULT;
  3958. goto end;
  3959. }
  3960. /*
  3961. * When the pipeline has issue, the requests can be queued up in the
  3962. * pipeline. In this case, we should reject the additional request.
  3963. * The maximum number of request allowed to be outstanding is 2.
  3964. *
  3965. */
  3966. if (atomic_read(&ctx_isp->process_bubble)) {
  3967. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3968. "ctx_id:%d Processing bubble cannot apply Request Id %llu",
  3969. ctx->ctx_id,
  3970. apply->request_id);
  3971. rc = -EFAULT;
  3972. goto end;
  3973. }
  3974. /*
  3975. * When isp processing internal recovery, the crm may still apply
  3976. * req to isp ctx. In this case, we should reject this req apply.
  3977. */
  3978. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3979. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3980. "ctx_id:%d Processing recovery cannot apply Request Id %lld",
  3981. ctx->ctx_id,
  3982. apply->request_id);
  3983. rc = -EAGAIN;
  3984. goto end;
  3985. }
  3986. spin_lock_bh(&ctx->lock);
  3987. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  3988. list);
  3989. spin_unlock_bh(&ctx->lock);
  3990. /*
  3991. * Check whether the request id is matching the tip, if not, this means
  3992. * we are in the middle of the error handling. Need to reject this apply
  3993. */
  3994. if (req->request_id != apply->request_id) {
  3995. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3996. "ctx_id:%d Invalid Request Id asking %llu existing %llu",
  3997. ctx->ctx_id,
  3998. apply->request_id, req->request_id);
  3999. rc = -EFAULT;
  4000. goto end;
  4001. }
  4002. CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
  4003. req->request_id,
  4004. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
  4005. ctx->ctx_id);
  4006. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4007. if (ctx_isp->active_req_cnt >= 2) {
  4008. CAM_WARN_RATE_LIMIT(CAM_ISP,
  4009. "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u",
  4010. req->request_id,
  4011. ctx_isp->active_req_cnt,
  4012. ctx->ctx_id);
  4013. spin_lock_bh(&ctx->lock);
  4014. if (!list_empty(&ctx->active_req_list))
  4015. active_req = list_first_entry(&ctx->active_req_list,
  4016. struct cam_ctx_request, list);
  4017. else
  4018. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4019. "WARNING: should not happen (cnt = %d) but active_list empty",
  4020. ctx_isp->active_req_cnt);
  4021. spin_unlock_bh(&ctx->lock);
  4022. if (active_req) {
  4023. active_req_isp =
  4024. (struct cam_isp_ctx_req *) active_req->req_priv;
  4025. __cam_isp_ctx_handle_buf_done_fail_log(ctx_isp,
  4026. active_req->request_id, active_req_isp);
  4027. }
  4028. rc = -EFAULT;
  4029. goto end;
  4030. }
  4031. req_isp->bubble_report = apply->report_if_bubble;
  4032. /*
  4033. * Reset all buf done/bubble flags for the req being applied
  4034. * If internal recovery has led to re-apply of same
  4035. * request, clear all stale entities
  4036. */
  4037. req_isp->num_acked = 0;
  4038. req_isp->num_deferred_acks = 0;
  4039. req_isp->cdm_reset_before_apply = false;
  4040. req_isp->bubble_detected = false;
  4041. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4042. cfg.request_id = req->request_id;
  4043. cfg.hw_update_entries = req_isp->cfg;
  4044. cfg.num_hw_update_entries = req_isp->num_cfg;
  4045. cfg.priv = &req_isp->hw_update_data;
  4046. cfg.init_packet = 0;
  4047. cfg.reapply_type = req_isp->reapply_type;
  4048. cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
  4049. if ((ctx_isp->evt_inject_params.is_valid) &&
  4050. (req->request_id == ctx_isp->evt_inject_params.req_id)) {
  4051. rc = cam_isp_context_apply_evt_injection(ctx_isp->base);
  4052. if (!rc)
  4053. goto end;
  4054. }
  4055. atomic_set(&ctx_isp->apply_in_progress, 1);
  4056. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  4057. if (!rc) {
  4058. spin_lock_bh(&ctx->lock);
  4059. ctx_isp->substate_activated = next_state;
  4060. ctx_isp->last_applied_req_id = apply->request_id;
  4061. ctx_isp->last_applied_jiffies = jiffies;
  4062. list_del_init(&req->list);
  4063. if (atomic_read(&ctx_isp->internal_recovery_set))
  4064. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  4065. else
  4066. list_add_tail(&req->list, &ctx->wait_req_list);
  4067. CAM_DBG(CAM_ISP, "new substate Substate[%s], applied req %lld",
  4068. __cam_isp_ctx_substate_val_to_type(next_state),
  4069. ctx_isp->last_applied_req_id);
  4070. spin_unlock_bh(&ctx->lock);
  4071. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  4072. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  4073. req->request_id);
  4074. __cam_isp_ctx_update_event_record(ctx_isp,
  4075. CAM_ISP_CTX_EVENT_APPLY, req);
  4076. } else if (rc == -EALREADY) {
  4077. spin_lock_bh(&ctx->lock);
  4078. req_isp->bubble_detected = true;
  4079. req_isp->cdm_reset_before_apply = false;
  4080. atomic_set(&ctx_isp->process_bubble, 1);
  4081. list_del_init(&req->list);
  4082. list_add(&req->list, &ctx->active_req_list);
  4083. ctx_isp->active_req_cnt++;
  4084. spin_unlock_bh(&ctx->lock);
  4085. CAM_DBG(CAM_REQ,
  4086. "move request %lld to active list(cnt = %d), ctx %u",
  4087. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  4088. } else {
  4089. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4090. "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
  4091. ctx->ctx_id, apply->request_id, rc);
  4092. }
  4093. atomic_set(&ctx_isp->apply_in_progress, 0);
  4094. end:
  4095. return rc;
  4096. }
  4097. static int __cam_isp_ctx_apply_req_in_sof(
  4098. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4099. {
  4100. int rc = 0;
  4101. struct cam_isp_context *ctx_isp =
  4102. (struct cam_isp_context *) ctx->ctx_priv;
  4103. CAM_DBG(CAM_ISP, "current Substate[%s]",
  4104. __cam_isp_ctx_substate_val_to_type(
  4105. ctx_isp->substate_activated));
  4106. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4107. CAM_ISP_CTX_ACTIVATED_APPLIED);
  4108. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4109. __cam_isp_ctx_substate_val_to_type(
  4110. ctx_isp->substate_activated));
  4111. if (rc)
  4112. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  4113. __cam_isp_ctx_substate_val_to_type(
  4114. ctx_isp->substate_activated), rc);
  4115. return rc;
  4116. }
  4117. static int __cam_isp_ctx_apply_req_in_epoch(
  4118. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4119. {
  4120. int rc = 0;
  4121. struct cam_isp_context *ctx_isp =
  4122. (struct cam_isp_context *) ctx->ctx_priv;
  4123. CAM_DBG(CAM_ISP, "current Substate[%s]",
  4124. __cam_isp_ctx_substate_val_to_type(
  4125. ctx_isp->substate_activated));
  4126. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4127. CAM_ISP_CTX_ACTIVATED_APPLIED);
  4128. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4129. __cam_isp_ctx_substate_val_to_type(
  4130. ctx_isp->substate_activated));
  4131. if (rc)
  4132. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  4133. __cam_isp_ctx_substate_val_to_type(
  4134. ctx_isp->substate_activated), rc);
  4135. return rc;
  4136. }
  4137. static int __cam_isp_ctx_apply_req_in_bubble(
  4138. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4139. {
  4140. int rc = 0;
  4141. struct cam_isp_context *ctx_isp =
  4142. (struct cam_isp_context *) ctx->ctx_priv;
  4143. CAM_DBG(CAM_ISP, "current Substate[%s]",
  4144. __cam_isp_ctx_substate_val_to_type(
  4145. ctx_isp->substate_activated));
  4146. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4147. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
  4148. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4149. __cam_isp_ctx_substate_val_to_type(
  4150. ctx_isp->substate_activated));
  4151. if (rc)
  4152. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  4153. __cam_isp_ctx_substate_val_to_type(
  4154. ctx_isp->substate_activated), rc);
  4155. return rc;
  4156. }
  4157. static int __cam_isp_ctx_apply_default_req_settings(
  4158. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4159. {
  4160. int rc = 0;
  4161. struct cam_isp_context *isp_ctx =
  4162. (struct cam_isp_context *) ctx->ctx_priv;
  4163. struct cam_hw_cmd_args hw_cmd_args;
  4164. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4165. hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx;
  4166. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4167. isp_hw_cmd_args.cmd_type =
  4168. CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG;
  4169. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4170. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4171. &hw_cmd_args);
  4172. if (rc)
  4173. CAM_ERR(CAM_ISP,
  4174. "Failed to apply default settings rc %d", rc);
  4175. else
  4176. CAM_DBG(CAM_ISP, "Applied default settings rc %d", rc);
  4177. return rc;
  4178. }
  4179. static void *cam_isp_ctx_user_dump_req_list(
  4180. void *dump_struct, uint8_t *addr_ptr)
  4181. {
  4182. struct list_head *head = NULL;
  4183. uint64_t *addr;
  4184. struct cam_ctx_request *req, *req_temp;
  4185. head = (struct list_head *)dump_struct;
  4186. addr = (uint64_t *)addr_ptr;
  4187. if (!list_empty(head)) {
  4188. list_for_each_entry_safe(req, req_temp, head, list) {
  4189. *addr++ = req->request_id;
  4190. }
  4191. }
  4192. return addr;
  4193. }
  4194. static void *cam_isp_ctx_user_dump_active_requests(
  4195. void *dump_struct, uint8_t *addr_ptr)
  4196. {
  4197. uint64_t *addr;
  4198. struct cam_ctx_request *req;
  4199. req = (struct cam_ctx_request *)dump_struct;
  4200. addr = (uint64_t *)addr_ptr;
  4201. *addr++ = req->request_id;
  4202. return addr;
  4203. }
  4204. static int __cam_isp_ctx_dump_req_info(
  4205. struct cam_context *ctx,
  4206. struct cam_ctx_request *req,
  4207. struct cam_common_hw_dump_args *dump_args)
  4208. {
  4209. int i, rc = 0;
  4210. uint32_t min_len;
  4211. size_t remain_len;
  4212. struct cam_isp_ctx_req *req_isp;
  4213. struct cam_isp_context *ctx_isp;
  4214. struct cam_ctx_request *req_temp;
  4215. if (!req || !ctx || !dump_args) {
  4216. CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK",
  4217. req, ctx, dump_args);
  4218. return -EINVAL;
  4219. }
  4220. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4221. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  4222. if (dump_args->buf_len <= dump_args->offset) {
  4223. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  4224. dump_args->buf_len, dump_args->offset);
  4225. return -ENOSPC;
  4226. }
  4227. remain_len = dump_args->buf_len - dump_args->offset;
  4228. min_len = sizeof(struct cam_isp_context_dump_header) +
  4229. (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS *
  4230. req_isp->num_fence_map_out *
  4231. sizeof(uint64_t));
  4232. if (remain_len < min_len) {
  4233. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  4234. remain_len, min_len);
  4235. return -ENOSPC;
  4236. }
  4237. /* Dump pending request list */
  4238. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4239. &ctx->pending_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_PENDING_REQUESTS:");
  4240. if (rc) {
  4241. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Pending request dump failed, rc: %d",
  4242. rc);
  4243. return rc;
  4244. }
  4245. /* Dump applied request list */
  4246. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4247. &ctx->wait_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_APPLIED_REQUESTS:");
  4248. if (rc) {
  4249. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Applied request dump failed, rc: %d",
  4250. rc);
  4251. return rc;
  4252. }
  4253. /* Dump active request list */
  4254. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4255. &ctx->active_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_ACTIVE_REQUESTS:");
  4256. if (rc) {
  4257. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Active request dump failed, rc: %d",
  4258. rc);
  4259. return rc;
  4260. }
  4261. /* Dump active request fences */
  4262. if (!list_empty(&ctx->active_req_list)) {
  4263. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  4264. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4265. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4266. rc = cam_common_user_dump_helper(dump_args,
  4267. cam_isp_ctx_user_dump_active_requests,
  4268. req, sizeof(uint64_t),
  4269. "ISP_OUT_FENCE_REQUEST_ACTIVE.%s.%u.%d:",
  4270. __cam_isp_ife_sfe_resource_handle_id_to_type(
  4271. req_isp->fence_map_out[i].resource_handle),
  4272. req_isp->fence_map_out[i].image_buf_addr[0],
  4273. req_isp->fence_map_out[i].sync_id);
  4274. if (rc) {
  4275. CAM_ERR(CAM_ISP,
  4276. "CAM_ISP_CONTEXT DUMP_REQ_INFO: Dump failed, rc: %d",
  4277. rc);
  4278. return rc;
  4279. }
  4280. }
  4281. }
  4282. }
  4283. return rc;
  4284. }
  4285. static void *cam_isp_ctx_user_dump_timer(
  4286. void *dump_struct, uint8_t *addr_ptr)
  4287. {
  4288. struct cam_ctx_request *req = NULL;
  4289. struct cam_isp_ctx_req *req_isp = NULL;
  4290. uint64_t *addr;
  4291. ktime_t cur_time;
  4292. req = (struct cam_ctx_request *)dump_struct;
  4293. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4294. cur_time = ktime_get();
  4295. addr = (uint64_t *)addr_ptr;
  4296. *addr++ = req->request_id;
  4297. *addr++ = ktime_to_timespec64(
  4298. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_sec;
  4299. *addr++ = ktime_to_timespec64(
  4300. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_nsec / NSEC_PER_USEC;
  4301. *addr++ = ktime_to_timespec64(cur_time).tv_sec;
  4302. *addr++ = ktime_to_timespec64(cur_time).tv_nsec / NSEC_PER_USEC;
  4303. return addr;
  4304. }
  4305. static void *cam_isp_ctx_user_dump_stream_info(
  4306. void *dump_struct, uint8_t *addr_ptr)
  4307. {
  4308. struct cam_context *ctx = NULL;
  4309. int32_t *addr;
  4310. ctx = (struct cam_context *)dump_struct;
  4311. addr = (int32_t *)addr_ptr;
  4312. *addr++ = ctx->ctx_id;
  4313. *addr++ = ctx->dev_hdl;
  4314. *addr++ = ctx->link_hdl;
  4315. return addr;
  4316. }
  4317. static int __cam_isp_ctx_dump_in_top_state(
  4318. struct cam_context *ctx,
  4319. struct cam_req_mgr_dump_info *dump_info)
  4320. {
  4321. int rc = 0;
  4322. bool dump_only_event_record = false;
  4323. size_t buf_len;
  4324. size_t remain_len;
  4325. ktime_t cur_time;
  4326. uint32_t min_len;
  4327. uint64_t diff;
  4328. uintptr_t cpu_addr;
  4329. uint8_t req_type;
  4330. struct cam_isp_context *ctx_isp;
  4331. struct cam_ctx_request *req = NULL;
  4332. struct cam_isp_ctx_req *req_isp;
  4333. struct cam_ctx_request *req_temp;
  4334. struct cam_hw_dump_args ife_dump_args;
  4335. struct cam_common_hw_dump_args dump_args;
  4336. struct cam_hw_cmd_args hw_cmd_args;
  4337. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4338. spin_lock_bh(&ctx->lock);
  4339. list_for_each_entry_safe(req, req_temp,
  4340. &ctx->active_req_list, list) {
  4341. if (req->request_id == dump_info->req_id) {
  4342. CAM_INFO(CAM_ISP, "isp dump active list req: %lld",
  4343. dump_info->req_id);
  4344. req_type = 'a';
  4345. goto hw_dump;
  4346. }
  4347. }
  4348. list_for_each_entry_safe(req, req_temp,
  4349. &ctx->wait_req_list, list) {
  4350. if (req->request_id == dump_info->req_id) {
  4351. CAM_INFO(CAM_ISP, "isp dump wait list req: %lld",
  4352. dump_info->req_id);
  4353. req_type = 'w';
  4354. goto hw_dump;
  4355. }
  4356. }
  4357. list_for_each_entry_safe(req, req_temp,
  4358. &ctx->pending_req_list, list) {
  4359. if (req->request_id == dump_info->req_id) {
  4360. CAM_INFO(CAM_ISP, "isp dump pending list req: %lld",
  4361. dump_info->req_id);
  4362. req_type = 'p';
  4363. goto hw_dump;
  4364. }
  4365. }
  4366. goto end;
  4367. hw_dump:
  4368. rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
  4369. &cpu_addr, &buf_len);
  4370. if (rc) {
  4371. CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
  4372. dump_info->buf_handle, rc);
  4373. goto end;
  4374. }
  4375. if (buf_len <= dump_info->offset) {
  4376. spin_unlock_bh(&ctx->lock);
  4377. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  4378. buf_len, dump_info->offset);
  4379. return -ENOSPC;
  4380. }
  4381. remain_len = buf_len - dump_info->offset;
  4382. min_len = sizeof(struct cam_isp_context_dump_header) +
  4383. (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
  4384. if (remain_len < min_len) {
  4385. spin_unlock_bh(&ctx->lock);
  4386. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  4387. remain_len, min_len);
  4388. return -ENOSPC;
  4389. }
  4390. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4391. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4392. cur_time = ktime_get();
  4393. diff = ktime_us_delta(
  4394. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY],
  4395. cur_time);
  4396. __cam_isp_ctx_print_event_record(ctx_isp);
  4397. if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
  4398. CAM_INFO(CAM_ISP, "req %lld found no error",
  4399. req->request_id);
  4400. dump_only_event_record = true;
  4401. }
  4402. dump_args.req_id = dump_info->req_id;
  4403. dump_args.cpu_addr = cpu_addr;
  4404. dump_args.buf_len = buf_len;
  4405. dump_args.offset = dump_info->offset;
  4406. dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4407. /* Dump time info */
  4408. rc = cam_common_user_dump_helper(&dump_args, cam_isp_ctx_user_dump_timer,
  4409. req, sizeof(uint64_t), "ISP_CTX_DUMP:.%c", req_type);
  4410. if (rc) {
  4411. CAM_ERR(CAM_ISP, "Time dump fail %lld, rc: %d",
  4412. req->request_id, rc);
  4413. goto end;
  4414. }
  4415. dump_info->offset = dump_args.offset;
  4416. /* Dump stream info */
  4417. ctx->ctxt_to_hw_map = ctx_isp->hw_ctx;
  4418. if (ctx->hw_mgr_intf->hw_dump) {
  4419. /* Dump first part of stream info from isp context */
  4420. rc = cam_common_user_dump_helper(&dump_args,
  4421. cam_isp_ctx_user_dump_stream_info, ctx,
  4422. sizeof(int32_t), "ISP_STREAM_INFO_FROM_CTX:");
  4423. if (rc) {
  4424. CAM_ERR(CAM_ISP, "ISP CTX stream info dump fail %lld, rc: %d",
  4425. req->request_id, rc);
  4426. goto end;
  4427. }
  4428. /* Dump second part of stream info from ife hw manager */
  4429. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  4430. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4431. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_DUMP_STREAM_INFO;
  4432. isp_hw_cmd_args.cmd_data = &dump_args;
  4433. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4434. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args);
  4435. if (rc) {
  4436. CAM_ERR(CAM_ISP, "IFE HW MGR stream info dump fail %lld, rc: %d",
  4437. req->request_id, rc);
  4438. goto end;
  4439. }
  4440. dump_info->offset = dump_args.offset;
  4441. }
  4442. /* Dump event record */
  4443. rc = __cam_isp_ctx_dump_event_record(ctx_isp, &dump_args);
  4444. if (rc) {
  4445. CAM_ERR(CAM_ISP, "Event record dump fail %lld, rc: %d",
  4446. req->request_id, rc);
  4447. goto end;
  4448. }
  4449. dump_info->offset = dump_args.offset;
  4450. if (dump_only_event_record) {
  4451. goto end;
  4452. }
  4453. /* Dump state monitor array */
  4454. rc = __cam_isp_ctx_user_dump_state_monitor_array(ctx_isp, &dump_args);
  4455. if (rc) {
  4456. CAM_ERR(CAM_ISP, "Dump event fail %lld, rc: %d",
  4457. req->request_id, rc);
  4458. goto end;
  4459. }
  4460. /* Dump request info */
  4461. rc = __cam_isp_ctx_dump_req_info(ctx, req, &dump_args);
  4462. if (rc) {
  4463. CAM_ERR(CAM_ISP, "Dump Req info fail %lld, rc: %d",
  4464. req->request_id, rc);
  4465. goto end;
  4466. }
  4467. spin_unlock_bh(&ctx->lock);
  4468. /* Dump CSID, VFE, and SFE info */
  4469. dump_info->offset = dump_args.offset;
  4470. if (ctx->hw_mgr_intf->hw_dump) {
  4471. ife_dump_args.offset = dump_args.offset;
  4472. ife_dump_args.request_id = dump_info->req_id;
  4473. ife_dump_args.buf_handle = dump_info->buf_handle;
  4474. ife_dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4475. rc = ctx->hw_mgr_intf->hw_dump(
  4476. ctx->hw_mgr_intf->hw_mgr_priv,
  4477. &ife_dump_args);
  4478. dump_info->offset = ife_dump_args.offset;
  4479. }
  4480. return rc;
  4481. end:
  4482. spin_unlock_bh(&ctx->lock);
  4483. return rc;
  4484. }
  4485. static int __cam_isp_ctx_flush_req_in_flushed_state(
  4486. struct cam_context *ctx,
  4487. struct cam_req_mgr_flush_request *flush_req)
  4488. {
  4489. CAM_INFO(CAM_ISP, "Flush (type %d) in flushed state req id %lld ctx_id:%d",
  4490. flush_req->type, flush_req->req_id, ctx->ctx_id);
  4491. if (flush_req->req_id > ctx->last_flush_req)
  4492. ctx->last_flush_req = flush_req->req_id;
  4493. return 0;
  4494. }
  4495. static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
  4496. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  4497. {
  4498. int i, rc, tmp = 0;
  4499. uint32_t cancel_req_id_found = 0;
  4500. struct cam_ctx_request *req;
  4501. struct cam_ctx_request *req_temp;
  4502. struct cam_isp_ctx_req *req_isp;
  4503. struct list_head flush_list;
  4504. struct cam_isp_context *ctx_isp = NULL;
  4505. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4506. INIT_LIST_HEAD(&flush_list);
  4507. if (list_empty(req_list)) {
  4508. CAM_DBG(CAM_ISP, "request list is empty");
  4509. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4510. CAM_INFO(CAM_ISP, "no request to cancel (last applied:%lld cancel:%lld)",
  4511. ctx_isp->last_applied_req_id, flush_req->req_id);
  4512. return -EINVAL;
  4513. } else
  4514. return 0;
  4515. }
  4516. CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
  4517. flush_req->type, flush_req->req_id);
  4518. list_for_each_entry_safe(req, req_temp, req_list, list) {
  4519. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4520. if (req->request_id != flush_req->req_id) {
  4521. continue;
  4522. } else {
  4523. list_del_init(&req->list);
  4524. list_add_tail(&req->list, &flush_list);
  4525. cancel_req_id_found = 1;
  4526. __cam_isp_ctx_update_state_monitor_array(
  4527. ctx_isp,
  4528. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
  4529. req->request_id);
  4530. break;
  4531. }
  4532. }
  4533. list_del_init(&req->list);
  4534. list_add_tail(&req->list, &flush_list);
  4535. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  4536. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id);
  4537. }
  4538. if (list_empty(&flush_list)) {
  4539. /*
  4540. * Maybe the req isn't sent to KMD since UMD already skip
  4541. * req in CSL layer.
  4542. */
  4543. CAM_INFO(CAM_ISP,
  4544. "flush list is empty, flush type %d for req %llu",
  4545. flush_req->type, flush_req->req_id);
  4546. return 0;
  4547. }
  4548. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  4549. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4550. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4551. if (req_isp->fence_map_out[i].sync_id != -1) {
  4552. CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
  4553. req->request_id,
  4554. req_isp->fence_map_out[i].sync_id);
  4555. rc = cam_sync_signal(
  4556. req_isp->fence_map_out[i].sync_id,
  4557. CAM_SYNC_STATE_SIGNALED_CANCEL,
  4558. CAM_SYNC_ISP_EVENT_FLUSH);
  4559. if (rc) {
  4560. tmp = req_isp->fence_map_out[i].sync_id;
  4561. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4562. "signal fence %d failed", tmp);
  4563. }
  4564. req_isp->fence_map_out[i].sync_id = -1;
  4565. }
  4566. }
  4567. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  4568. req_isp->cdm_reset_before_apply = false;
  4569. list_del_init(&req->list);
  4570. list_add_tail(&req->list, &ctx->free_req_list);
  4571. }
  4572. return 0;
  4573. }
  4574. static int __cam_isp_ctx_flush_req_in_top_state(
  4575. struct cam_context *ctx,
  4576. struct cam_req_mgr_flush_request *flush_req)
  4577. {
  4578. int rc = 0;
  4579. struct cam_isp_context *ctx_isp;
  4580. struct cam_isp_stop_args stop_isp;
  4581. struct cam_hw_stop_args stop_args;
  4582. struct cam_hw_reset_args reset_args;
  4583. struct cam_req_mgr_timer_notify timer;
  4584. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4585. CAM_DBG(CAM_ISP, "Flush pending list");
  4586. spin_lock_bh(&ctx->lock);
  4587. __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4588. spin_unlock_bh(&ctx->lock);
  4589. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  4590. if (ctx->state <= CAM_CTX_READY) {
  4591. ctx->state = CAM_CTX_ACQUIRED;
  4592. goto end;
  4593. }
  4594. spin_lock_bh(&ctx->lock);
  4595. ctx->state = CAM_CTX_FLUSHED;
  4596. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  4597. spin_unlock_bh(&ctx->lock);
  4598. CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%d",
  4599. flush_req->req_id, ctx->ctx_id);
  4600. ctx->last_flush_req = flush_req->req_id;
  4601. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx);
  4602. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4603. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  4604. stop_isp.stop_only = true;
  4605. stop_isp.is_internal_stop = false;
  4606. stop_args.args = (void *)&stop_isp;
  4607. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  4608. &stop_args);
  4609. if (rc)
  4610. CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
  4611. rc);
  4612. CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.");
  4613. CAM_DBG(CAM_ISP, "Flush wait and active lists");
  4614. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  4615. timer.link_hdl = ctx->link_hdl;
  4616. timer.dev_hdl = ctx->dev_hdl;
  4617. timer.state = false;
  4618. ctx->ctx_crm_intf->notify_timer(&timer);
  4619. }
  4620. spin_lock_bh(&ctx->lock);
  4621. if (!list_empty(&ctx->wait_req_list))
  4622. __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
  4623. flush_req);
  4624. if (!list_empty(&ctx->active_req_list))
  4625. __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
  4626. flush_req);
  4627. ctx_isp->active_req_cnt = 0;
  4628. spin_unlock_bh(&ctx->lock);
  4629. reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4630. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  4631. &reset_args);
  4632. if (rc)
  4633. CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
  4634. ctx_isp->init_received = false;
  4635. }
  4636. end:
  4637. ctx_isp->bubble_frame_cnt = 0;
  4638. ctx_isp->sof_dbg_irq_en = false;
  4639. atomic_set(&ctx_isp->process_bubble, 0);
  4640. atomic_set(&ctx_isp->rxd_epoch, 0);
  4641. atomic_set(&ctx_isp->internal_recovery_set, 0);
  4642. return rc;
  4643. }
  4644. static int __cam_isp_ctx_flush_req_in_ready(
  4645. struct cam_context *ctx,
  4646. struct cam_req_mgr_flush_request *flush_req)
  4647. {
  4648. int rc = 0;
  4649. CAM_DBG(CAM_ISP, "try to flush pending list");
  4650. spin_lock_bh(&ctx->lock);
  4651. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4652. /* if nothing is in pending req list, change state to acquire */
  4653. if (list_empty(&ctx->pending_req_list))
  4654. ctx->state = CAM_CTX_ACQUIRED;
  4655. spin_unlock_bh(&ctx->lock);
  4656. trace_cam_context_state("ISP", ctx);
  4657. CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
  4658. ctx->state);
  4659. return rc;
  4660. }
  4661. static struct cam_ctx_ops
  4662. cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4663. /* SOF */
  4664. {
  4665. .ioctl_ops = {},
  4666. .crm_ops = {
  4667. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4668. .notify_frame_skip =
  4669. __cam_isp_ctx_apply_default_req_settings,
  4670. },
  4671. .irq_ops = NULL,
  4672. },
  4673. /* APPLIED */
  4674. {
  4675. .ioctl_ops = {},
  4676. .crm_ops = {},
  4677. .irq_ops = NULL,
  4678. },
  4679. /* EPOCH */
  4680. {
  4681. .ioctl_ops = {},
  4682. .crm_ops = {
  4683. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4684. .notify_frame_skip =
  4685. __cam_isp_ctx_apply_default_req_settings,
  4686. },
  4687. .irq_ops = NULL,
  4688. },
  4689. /* BUBBLE */
  4690. {
  4691. .ioctl_ops = {},
  4692. .crm_ops = {
  4693. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4694. .notify_frame_skip =
  4695. __cam_isp_ctx_apply_default_req_settings,
  4696. },
  4697. .irq_ops = NULL,
  4698. },
  4699. /* Bubble Applied */
  4700. {
  4701. .ioctl_ops = {},
  4702. .crm_ops = {},
  4703. .irq_ops = NULL,
  4704. },
  4705. /* HW ERROR */
  4706. {
  4707. .ioctl_ops = {},
  4708. .crm_ops = {},
  4709. .irq_ops = NULL,
  4710. },
  4711. /* HALT */
  4712. {
  4713. .ioctl_ops = {},
  4714. .crm_ops = {},
  4715. .irq_ops = NULL,
  4716. },
  4717. };
  4718. static struct cam_ctx_ops
  4719. cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4720. /* SOF */
  4721. {
  4722. .ioctl_ops = {},
  4723. .crm_ops = {
  4724. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4725. },
  4726. .irq_ops = NULL,
  4727. },
  4728. /* APPLIED */
  4729. {
  4730. .ioctl_ops = {},
  4731. .crm_ops = {},
  4732. .irq_ops = NULL,
  4733. },
  4734. /* EPOCH */
  4735. {
  4736. .ioctl_ops = {},
  4737. .crm_ops = {
  4738. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4739. },
  4740. .irq_ops = NULL,
  4741. },
  4742. /* BUBBLE */
  4743. {
  4744. .ioctl_ops = {},
  4745. .crm_ops = {
  4746. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4747. },
  4748. .irq_ops = NULL,
  4749. },
  4750. /* Bubble Applied */
  4751. {
  4752. .ioctl_ops = {},
  4753. .crm_ops = {},
  4754. .irq_ops = NULL,
  4755. },
  4756. /* HW ERROR */
  4757. {
  4758. .ioctl_ops = {},
  4759. .crm_ops = {},
  4760. .irq_ops = NULL,
  4761. },
  4762. /* HALT */
  4763. {
  4764. .ioctl_ops = {},
  4765. .crm_ops = {},
  4766. .irq_ops = NULL,
  4767. },
  4768. };
  4769. static int __cam_isp_ctx_rdi_only_sof_in_top_state(
  4770. struct cam_isp_context *ctx_isp, void *evt_data)
  4771. {
  4772. int rc = 0;
  4773. struct cam_context *ctx = ctx_isp->base;
  4774. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4775. uint64_t request_id = 0;
  4776. if (!evt_data) {
  4777. CAM_ERR(CAM_ISP, "in valid sof event data");
  4778. return -EINVAL;
  4779. }
  4780. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4781. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4782. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4783. /*
  4784. * notify reqmgr with sof signal. Note, due to scheduling delay
  4785. * we can run into situation that two active requests has already
  4786. * be in the active queue while we try to do the notification.
  4787. * In this case, we need to skip the current notification. This
  4788. * helps the state machine to catch up the delay.
  4789. */
  4790. if (ctx_isp->active_req_cnt <= 2) {
  4791. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4792. /*
  4793. * It's possible for rup done to be processed before
  4794. * SOF, check for first active request shutter here
  4795. */
  4796. if (!list_empty(&ctx->active_req_list)) {
  4797. struct cam_ctx_request *req = NULL;
  4798. req = list_first_entry(&ctx->active_req_list,
  4799. struct cam_ctx_request, list);
  4800. if (req->request_id > ctx_isp->reported_req_id) {
  4801. request_id = req->request_id;
  4802. ctx_isp->reported_req_id = request_id;
  4803. }
  4804. }
  4805. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4806. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4807. } else {
  4808. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  4809. }
  4810. if (list_empty(&ctx->active_req_list))
  4811. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4812. else
  4813. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  4814. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4815. __cam_isp_ctx_substate_val_to_type(
  4816. ctx_isp->substate_activated));
  4817. return rc;
  4818. }
  4819. static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
  4820. struct cam_isp_context *ctx_isp, void *evt_data)
  4821. {
  4822. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4823. if (!evt_data) {
  4824. CAM_ERR(CAM_ISP, "in valid sof event data");
  4825. return -EINVAL;
  4826. }
  4827. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4828. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4829. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4830. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
  4831. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4832. __cam_isp_ctx_substate_val_to_type(
  4833. ctx_isp->substate_activated));
  4834. return 0;
  4835. }
  4836. static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
  4837. struct cam_isp_context *ctx_isp, void *evt_data)
  4838. {
  4839. struct cam_ctx_request *req;
  4840. struct cam_isp_ctx_req *req_isp;
  4841. struct cam_context *ctx = ctx_isp->base;
  4842. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4843. uint64_t request_id = 0;
  4844. /*
  4845. * Sof in bubble applied state means, reg update not received.
  4846. * before increment frame id and override time stamp value, send
  4847. * the previous sof time stamp that got captured in the
  4848. * sof in applied state.
  4849. */
  4850. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4851. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4852. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4853. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4854. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4855. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4856. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4857. if (list_empty(&ctx->wait_req_list)) {
  4858. /*
  4859. * If no pending req in epoch, this is an error case.
  4860. * The recovery is to go back to sof state
  4861. */
  4862. CAM_ERR(CAM_ISP, "No wait request");
  4863. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4864. /* Send SOF event as empty frame*/
  4865. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4866. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4867. goto end;
  4868. }
  4869. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  4870. list);
  4871. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4872. req_isp->bubble_detected = true;
  4873. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  4874. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  4875. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  4876. req_isp->cdm_reset_before_apply = false;
  4877. if (req_isp->bubble_report) {
  4878. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  4879. req->request_id, ctx_isp);
  4880. atomic_set(&ctx_isp->process_bubble, 1);
  4881. } else {
  4882. req_isp->bubble_report = 0;
  4883. }
  4884. /*
  4885. * Always move the request to active list. Let buf done
  4886. * function handles the rest.
  4887. */
  4888. list_del_init(&req->list);
  4889. list_add_tail(&req->list, &ctx->active_req_list);
  4890. ctx_isp->active_req_cnt++;
  4891. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
  4892. req->request_id, ctx_isp->active_req_cnt);
  4893. if (!req_isp->bubble_report) {
  4894. if (req->request_id > ctx_isp->reported_req_id) {
  4895. request_id = req->request_id;
  4896. ctx_isp->reported_req_id = request_id;
  4897. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4898. CAM_REQ_MGR_SOF_EVENT_ERROR);
  4899. } else
  4900. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4901. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4902. } else
  4903. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4904. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4905. /* change the state to bubble, as reg update has not come */
  4906. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  4907. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4908. __cam_isp_ctx_substate_val_to_type(
  4909. ctx_isp->substate_activated));
  4910. end:
  4911. return 0;
  4912. }
  4913. static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
  4914. struct cam_isp_context *ctx_isp, void *evt_data)
  4915. {
  4916. uint32_t i;
  4917. struct cam_ctx_request *req;
  4918. struct cam_context *ctx = ctx_isp->base;
  4919. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4920. struct cam_isp_ctx_req *req_isp;
  4921. struct cam_hw_cmd_args hw_cmd_args;
  4922. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4923. uint64_t request_id = 0;
  4924. uint64_t last_cdm_done_req = 0;
  4925. int rc = 0;
  4926. if (!evt_data) {
  4927. CAM_ERR(CAM_ISP, "in valid sof event data");
  4928. return -EINVAL;
  4929. }
  4930. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4931. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4932. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4933. if (atomic_read(&ctx_isp->process_bubble)) {
  4934. if (list_empty(&ctx->active_req_list)) {
  4935. CAM_ERR(CAM_ISP, "No available active req in bubble");
  4936. atomic_set(&ctx_isp->process_bubble, 0);
  4937. return -EINVAL;
  4938. }
  4939. if (ctx_isp->last_sof_timestamp ==
  4940. ctx_isp->sof_timestamp_val) {
  4941. CAM_DBG(CAM_ISP,
  4942. "Tasklet delay detected! Bubble frame: %lld check skipped, sof_timestamp: %lld, ctx_id: %d",
  4943. ctx_isp->frame_id,
  4944. ctx_isp->sof_timestamp_val,
  4945. ctx->ctx_id);
  4946. goto end;
  4947. }
  4948. req = list_first_entry(&ctx->active_req_list,
  4949. struct cam_ctx_request, list);
  4950. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4951. if (req_isp->bubble_detected) {
  4952. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4953. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4954. isp_hw_cmd_args.cmd_type =
  4955. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  4956. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4957. rc = ctx->hw_mgr_intf->hw_cmd(
  4958. ctx->hw_mgr_intf->hw_mgr_priv,
  4959. &hw_cmd_args);
  4960. if (rc) {
  4961. CAM_ERR(CAM_ISP, "HW command failed");
  4962. return rc;
  4963. }
  4964. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  4965. CAM_DBG(CAM_ISP, "last_cdm_done req: %d ctx_id: %d",
  4966. last_cdm_done_req, ctx->ctx_id);
  4967. if (last_cdm_done_req >= req->request_id) {
  4968. CAM_DBG(CAM_ISP,
  4969. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  4970. req->request_id);
  4971. if (req_isp->num_fence_map_out ==
  4972. req_isp->num_deferred_acks) {
  4973. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  4974. true,
  4975. CAM_SYNC_STATE_SIGNALED_ERROR,
  4976. CAM_SYNC_ISP_EVENT_BUBBLE);
  4977. __cam_isp_ctx_handle_buf_done_for_req_list(
  4978. ctx_isp, req);
  4979. }
  4980. goto end;
  4981. } else {
  4982. CAM_WARN(CAM_ISP,
  4983. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  4984. req->request_id);
  4985. req_isp->num_acked = 0;
  4986. req_isp->num_deferred_acks = 0;
  4987. req_isp->bubble_detected = false;
  4988. req_isp->cdm_reset_before_apply = true;
  4989. list_del_init(&req->list);
  4990. list_add(&req->list, &ctx->pending_req_list);
  4991. atomic_set(&ctx_isp->process_bubble, 0);
  4992. ctx_isp->active_req_cnt--;
  4993. CAM_DBG(CAM_REQ,
  4994. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  4995. req->request_id,
  4996. ctx_isp->active_req_cnt, ctx->ctx_id);
  4997. }
  4998. goto end;
  4999. }
  5000. }
  5001. /*
  5002. * Signal all active requests with error and move the all the active
  5003. * requests to free list
  5004. */
  5005. while (!list_empty(&ctx->active_req_list)) {
  5006. req = list_first_entry(&ctx->active_req_list,
  5007. struct cam_ctx_request, list);
  5008. list_del_init(&req->list);
  5009. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5010. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  5011. req_isp->num_fence_map_out);
  5012. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5013. if (req_isp->fence_map_out[i].sync_id != -1) {
  5014. cam_sync_signal(
  5015. req_isp->fence_map_out[i].sync_id,
  5016. CAM_SYNC_STATE_SIGNALED_ERROR,
  5017. CAM_SYNC_ISP_EVENT_BUBBLE);
  5018. }
  5019. list_add_tail(&req->list, &ctx->free_req_list);
  5020. ctx_isp->active_req_cnt--;
  5021. }
  5022. end:
  5023. /* notify reqmgr with sof signal */
  5024. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  5025. /*
  5026. * It is idle frame with out any applied request id, send
  5027. * request id as zero
  5028. */
  5029. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  5030. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  5031. /*
  5032. * Can't move the substate to SOF if we are processing bubble,
  5033. * since the SOF substate can't receive REG_UPD and buf done,
  5034. * then the processing of bubble req can't be finished
  5035. */
  5036. if (!atomic_read(&ctx_isp->process_bubble))
  5037. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  5038. CAM_DBG(CAM_ISP, "next Substate[%s]",
  5039. __cam_isp_ctx_substate_val_to_type(
  5040. ctx_isp->substate_activated));
  5041. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  5042. return 0;
  5043. }
  5044. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state(
  5045. struct cam_isp_context *ctx_isp, void *evt_data)
  5046. {
  5047. struct cam_ctx_request *req = NULL;
  5048. struct cam_context *ctx = ctx_isp->base;
  5049. req = list_first_entry(&ctx->active_req_list,
  5050. struct cam_ctx_request, list);
  5051. CAM_INFO(CAM_ISP, "Received RUP for Bubble Request", req->request_id);
  5052. return 0;
  5053. }
  5054. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
  5055. struct cam_isp_context *ctx_isp, void *evt_data)
  5056. {
  5057. struct cam_ctx_request *req = NULL;
  5058. struct cam_context *ctx = ctx_isp->base;
  5059. struct cam_isp_ctx_req *req_isp;
  5060. uint64_t request_id = 0;
  5061. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  5062. /* notify reqmgr with sof signal*/
  5063. if (list_empty(&ctx->wait_req_list)) {
  5064. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  5065. goto error;
  5066. }
  5067. req = list_first_entry(&ctx->wait_req_list,
  5068. struct cam_ctx_request, list);
  5069. list_del_init(&req->list);
  5070. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5071. request_id =
  5072. (req_isp->hw_update_data.packet_opcode_type ==
  5073. CAM_ISP_PACKET_INIT_DEV) ? 0 : req->request_id;
  5074. if (req_isp->num_fence_map_out != 0) {
  5075. list_add_tail(&req->list, &ctx->active_req_list);
  5076. ctx_isp->active_req_cnt++;
  5077. CAM_DBG(CAM_ISP,
  5078. "move request %lld to active list(cnt = %d)",
  5079. req->request_id, ctx_isp->active_req_cnt);
  5080. /* if packet has buffers, set correct request id */
  5081. request_id = req->request_id;
  5082. } else {
  5083. /* no io config, so the request is completed. */
  5084. list_add_tail(&req->list, &ctx->free_req_list);
  5085. CAM_DBG(CAM_ISP,
  5086. "move active req %lld to free list(cnt=%d)",
  5087. req->request_id, ctx_isp->active_req_cnt);
  5088. }
  5089. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  5090. if (request_id)
  5091. ctx_isp->reported_req_id = request_id;
  5092. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  5093. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  5094. CAM_DBG(CAM_ISP, "next Substate[%s]",
  5095. __cam_isp_ctx_substate_val_to_type(
  5096. ctx_isp->substate_activated));
  5097. __cam_isp_ctx_update_event_record(ctx_isp,
  5098. CAM_ISP_CTX_EVENT_RUP, req);
  5099. return 0;
  5100. error:
  5101. /* Send SOF event as idle frame*/
  5102. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  5103. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  5104. __cam_isp_ctx_update_event_record(ctx_isp,
  5105. CAM_ISP_CTX_EVENT_RUP, NULL);
  5106. /*
  5107. * There is no request in the pending list, move the sub state machine
  5108. * to SOF sub state
  5109. */
  5110. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  5111. return 0;
  5112. }
  5113. static struct cam_isp_ctx_irq_ops
  5114. cam_isp_ctx_rdi_only_activated_state_machine_irq
  5115. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  5116. /* SOF */
  5117. {
  5118. .irq_ops = {
  5119. NULL,
  5120. __cam_isp_ctx_rdi_only_sof_in_top_state,
  5121. __cam_isp_ctx_reg_upd_in_sof,
  5122. NULL,
  5123. __cam_isp_ctx_notify_eof_in_activated_state,
  5124. NULL,
  5125. },
  5126. },
  5127. /* APPLIED */
  5128. {
  5129. .irq_ops = {
  5130. __cam_isp_ctx_handle_error,
  5131. __cam_isp_ctx_rdi_only_sof_in_applied_state,
  5132. __cam_isp_ctx_reg_upd_in_applied_state,
  5133. NULL,
  5134. __cam_isp_ctx_notify_eof_in_activated_state,
  5135. __cam_isp_ctx_buf_done_in_applied,
  5136. },
  5137. },
  5138. /* EPOCH */
  5139. {
  5140. .irq_ops = {
  5141. __cam_isp_ctx_handle_error,
  5142. __cam_isp_ctx_rdi_only_sof_in_top_state,
  5143. NULL,
  5144. NULL,
  5145. __cam_isp_ctx_notify_eof_in_activated_state,
  5146. __cam_isp_ctx_buf_done_in_epoch,
  5147. },
  5148. },
  5149. /* BUBBLE*/
  5150. {
  5151. .irq_ops = {
  5152. __cam_isp_ctx_handle_error,
  5153. __cam_isp_ctx_rdi_only_sof_in_bubble_state,
  5154. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state,
  5155. NULL,
  5156. __cam_isp_ctx_notify_eof_in_activated_state,
  5157. __cam_isp_ctx_buf_done_in_bubble,
  5158. },
  5159. },
  5160. /* BUBBLE APPLIED ie PRE_BUBBLE */
  5161. {
  5162. .irq_ops = {
  5163. __cam_isp_ctx_handle_error,
  5164. __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
  5165. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
  5166. NULL,
  5167. __cam_isp_ctx_notify_eof_in_activated_state,
  5168. __cam_isp_ctx_buf_done_in_bubble_applied,
  5169. },
  5170. },
  5171. /* HW ERROR */
  5172. {
  5173. },
  5174. /* HALT */
  5175. {
  5176. },
  5177. };
  5178. static int __cam_isp_ctx_rdi_only_apply_req_top_state(
  5179. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  5180. {
  5181. int rc = 0;
  5182. struct cam_isp_context *ctx_isp =
  5183. (struct cam_isp_context *) ctx->ctx_priv;
  5184. CAM_DBG(CAM_ISP, "current Substate[%s]",
  5185. __cam_isp_ctx_substate_val_to_type(
  5186. ctx_isp->substate_activated));
  5187. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  5188. CAM_ISP_CTX_ACTIVATED_APPLIED);
  5189. CAM_DBG(CAM_ISP, "new Substate[%s]",
  5190. __cam_isp_ctx_substate_val_to_type(
  5191. ctx_isp->substate_activated));
  5192. if (rc)
  5193. CAM_ERR_RATE_LIMIT(CAM_ISP,
  5194. "ctx_id:%d Apply failed in Substate[%s], rc %d",
  5195. ctx->ctx_id,
  5196. __cam_isp_ctx_substate_val_to_type(
  5197. ctx_isp->substate_activated), rc);
  5198. return rc;
  5199. }
  5200. static struct cam_ctx_ops
  5201. cam_isp_ctx_rdi_only_activated_state_machine
  5202. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  5203. /* SOF */
  5204. {
  5205. .ioctl_ops = {},
  5206. .crm_ops = {
  5207. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  5208. },
  5209. .irq_ops = NULL,
  5210. },
  5211. /* APPLIED */
  5212. {
  5213. .ioctl_ops = {},
  5214. .crm_ops = {},
  5215. .irq_ops = NULL,
  5216. },
  5217. /* EPOCH */
  5218. {
  5219. .ioctl_ops = {},
  5220. .crm_ops = {
  5221. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  5222. },
  5223. .irq_ops = NULL,
  5224. },
  5225. /* PRE BUBBLE */
  5226. {
  5227. .ioctl_ops = {},
  5228. .crm_ops = {},
  5229. .irq_ops = NULL,
  5230. },
  5231. /* BUBBLE */
  5232. {
  5233. .ioctl_ops = {},
  5234. .crm_ops = {},
  5235. .irq_ops = NULL,
  5236. },
  5237. /* HW ERROR */
  5238. {
  5239. .ioctl_ops = {},
  5240. .crm_ops = {},
  5241. .irq_ops = NULL,
  5242. },
  5243. /* HALT */
  5244. {
  5245. .ioctl_ops = {},
  5246. .crm_ops = {},
  5247. .irq_ops = NULL,
  5248. },
  5249. };
  5250. static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
  5251. struct cam_flush_dev_cmd *cmd)
  5252. {
  5253. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  5254. struct cam_req_mgr_flush_request flush_req;
  5255. if (!ctx_isp->offline_context) {
  5256. CAM_ERR(CAM_ISP, "flush dev only supported in offline context");
  5257. return -EINVAL;
  5258. }
  5259. flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL :
  5260. CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ;
  5261. flush_req.req_id = cmd->req_id;
  5262. CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu)", flush_req.type, flush_req.req_id);
  5263. switch (ctx->state) {
  5264. case CAM_CTX_ACQUIRED:
  5265. case CAM_CTX_ACTIVATED:
  5266. return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req);
  5267. case CAM_CTX_READY:
  5268. return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req);
  5269. default:
  5270. CAM_ERR(CAM_ISP, "flush dev in wrong state: %d", ctx->state);
  5271. return -EINVAL;
  5272. }
  5273. if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
  5274. cam_req_mgr_workq_flush(ctx_isp->workq);
  5275. }
  5276. static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
  5277. {
  5278. int i;
  5279. if (ctx->out_map_entries) {
  5280. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5281. kfree(ctx->out_map_entries[i]);
  5282. ctx->out_map_entries[i] = NULL;
  5283. }
  5284. kfree(ctx->out_map_entries);
  5285. ctx->out_map_entries = NULL;
  5286. }
  5287. if (ctx->in_map_entries) {
  5288. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5289. kfree(ctx->in_map_entries[i]);
  5290. ctx->in_map_entries[i] = NULL;
  5291. }
  5292. kfree(ctx->in_map_entries);
  5293. ctx->in_map_entries = NULL;
  5294. }
  5295. if (ctx->hw_update_entry) {
  5296. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5297. kfree(ctx->hw_update_entry[i]);
  5298. ctx->hw_update_entry[i] = NULL;
  5299. }
  5300. kfree(ctx->hw_update_entry);
  5301. ctx->hw_update_entry = NULL;
  5302. }
  5303. ctx->max_out_map_entries = 0;
  5304. ctx->max_in_map_entries = 0;
  5305. ctx->max_hw_update_entries = 0;
  5306. }
  5307. static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
  5308. void *cmd)
  5309. {
  5310. int rc = 0;
  5311. struct cam_hw_release_args rel_arg;
  5312. struct cam_isp_context *ctx_isp =
  5313. (struct cam_isp_context *) ctx->ctx_priv;
  5314. struct cam_req_mgr_flush_request flush_req;
  5315. int i;
  5316. if (ctx_isp->hw_ctx) {
  5317. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5318. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5319. &rel_arg);
  5320. ctx_isp->hw_ctx = NULL;
  5321. } else {
  5322. CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u]", ctx->ctx_id);
  5323. }
  5324. ctx->last_flush_req = 0;
  5325. ctx_isp->custom_enabled = false;
  5326. ctx_isp->use_frame_header_ts = false;
  5327. ctx_isp->use_default_apply = false;
  5328. ctx_isp->frame_id = 0;
  5329. ctx_isp->active_req_cnt = 0;
  5330. ctx_isp->reported_req_id = 0;
  5331. ctx_isp->reported_frame_id = 0;
  5332. ctx_isp->hw_acquired = false;
  5333. ctx_isp->init_received = false;
  5334. ctx_isp->support_consumed_addr = false;
  5335. ctx_isp->aeb_enabled = false;
  5336. ctx_isp->req_info.last_bufdone_req_id = 0;
  5337. kfree(ctx_isp->vfe_bus_comp_grp);
  5338. kfree(ctx_isp->sfe_bus_comp_grp);
  5339. ctx_isp->vfe_bus_comp_grp = NULL;
  5340. ctx_isp->sfe_bus_comp_grp = NULL;
  5341. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5342. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5343. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5344. /*
  5345. * Ideally, we should never have any active request here.
  5346. * But we still add some sanity check code here to help the debug
  5347. */
  5348. if (!list_empty(&ctx->active_req_list))
  5349. CAM_WARN(CAM_ISP, "Active list is not empty");
  5350. /* Flush all the pending request list */
  5351. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  5352. flush_req.link_hdl = ctx->link_hdl;
  5353. flush_req.dev_hdl = ctx->dev_hdl;
  5354. flush_req.req_id = 0;
  5355. CAM_DBG(CAM_ISP, "try to flush pending list");
  5356. spin_lock_bh(&ctx->lock);
  5357. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  5358. spin_unlock_bh(&ctx->lock);
  5359. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5360. cam_req_mgr_workq_destroy(&ctx_isp->workq);
  5361. ctx->state = CAM_CTX_ACQUIRED;
  5362. trace_cam_context_state("ISP", ctx);
  5363. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  5364. ctx->ctx_id, ctx->state);
  5365. return rc;
  5366. }
  5367. /* top level state machine */
  5368. static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
  5369. struct cam_release_dev_cmd *cmd)
  5370. {
  5371. int rc = 0;
  5372. int i;
  5373. struct cam_hw_release_args rel_arg;
  5374. struct cam_isp_context *ctx_isp =
  5375. (struct cam_isp_context *) ctx->ctx_priv;
  5376. struct cam_req_mgr_flush_request flush_req;
  5377. if (cmd && ctx_isp->hw_ctx) {
  5378. CAM_ERR(CAM_ISP, "releasing hw");
  5379. __cam_isp_ctx_release_hw_in_top_state(ctx, NULL);
  5380. }
  5381. if (ctx_isp->hw_ctx) {
  5382. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5383. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5384. &rel_arg);
  5385. ctx_isp->hw_ctx = NULL;
  5386. }
  5387. cam_common_release_evt_params(ctx->dev_hdl);
  5388. memset(&ctx_isp->evt_inject_params, 0, sizeof(struct cam_hw_inject_evt_param));
  5389. ctx->session_hdl = -1;
  5390. ctx->dev_hdl = -1;
  5391. ctx->link_hdl = -1;
  5392. ctx->ctx_crm_intf = NULL;
  5393. ctx->last_flush_req = 0;
  5394. ctx_isp->frame_id = 0;
  5395. ctx_isp->active_req_cnt = 0;
  5396. ctx_isp->reported_req_id = 0;
  5397. ctx_isp->reported_frame_id = 0;
  5398. ctx_isp->hw_acquired = false;
  5399. ctx_isp->init_received = false;
  5400. ctx_isp->offline_context = false;
  5401. ctx_isp->vfps_aux_context = false;
  5402. ctx_isp->rdi_only_context = false;
  5403. ctx_isp->req_info.last_bufdone_req_id = 0;
  5404. ctx_isp->v4l2_event_sub_ids = 0;
  5405. ctx_isp->resume_hw_in_flushed = false;
  5406. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5407. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5408. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5409. /*
  5410. * Ideally, we should never have any active request here.
  5411. * But we still add some sanity check code here to help the debug
  5412. */
  5413. if (!list_empty(&ctx->active_req_list))
  5414. CAM_ERR(CAM_ISP, "Active list is not empty");
  5415. /* Flush all the pending request list */
  5416. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  5417. flush_req.link_hdl = ctx->link_hdl;
  5418. flush_req.dev_hdl = ctx->dev_hdl;
  5419. flush_req.req_id = 0;
  5420. CAM_DBG(CAM_ISP, "try to flush pending list");
  5421. spin_lock_bh(&ctx->lock);
  5422. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  5423. spin_unlock_bh(&ctx->lock);
  5424. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5425. ctx->state = CAM_CTX_AVAILABLE;
  5426. trace_cam_context_state("ISP", ctx);
  5427. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  5428. ctx->ctx_id, ctx->state);
  5429. return rc;
  5430. }
  5431. static int __cam_isp_ctx_config_dev_in_top_state(
  5432. struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
  5433. {
  5434. int rc = 0, i;
  5435. struct cam_ctx_request *req = NULL;
  5436. struct cam_isp_ctx_req *req_isp;
  5437. struct cam_packet *packet;
  5438. size_t remain_len = 0;
  5439. struct cam_hw_prepare_update_args cfg = {0};
  5440. struct cam_req_mgr_add_request add_req;
  5441. struct cam_isp_context *ctx_isp =
  5442. (struct cam_isp_context *) ctx->ctx_priv;
  5443. struct cam_hw_cmd_args hw_cmd_args;
  5444. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5445. uint32_t packet_opcode = 0;
  5446. CAM_DBG(CAM_ISP, "get free request object......");
  5447. /* get free request */
  5448. spin_lock_bh(&ctx->lock);
  5449. if (!list_empty(&ctx->free_req_list)) {
  5450. req = list_first_entry(&ctx->free_req_list,
  5451. struct cam_ctx_request, list);
  5452. list_del_init(&req->list);
  5453. }
  5454. spin_unlock_bh(&ctx->lock);
  5455. if (!req) {
  5456. CAM_ERR(CAM_ISP, "No more request obj free");
  5457. return -ENOMEM;
  5458. }
  5459. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5460. remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet);
  5461. if (IS_ERR(packet)) {
  5462. rc = PTR_ERR(packet);
  5463. goto free_req;
  5464. }
  5465. /* Query the packet opcode */
  5466. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5467. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5468. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE;
  5469. isp_hw_cmd_args.cmd_data = (void *)packet;
  5470. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5471. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5472. &hw_cmd_args);
  5473. if (rc) {
  5474. CAM_ERR(CAM_ISP, "HW command failed");
  5475. goto free_req;
  5476. }
  5477. packet_opcode = isp_hw_cmd_args.u.packet_op_code;
  5478. if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV)
  5479. && (packet->header.request_id <= ctx->last_flush_req)) {
  5480. CAM_INFO(CAM_ISP,
  5481. "request %lld has been flushed, reject packet",
  5482. packet->header.request_id);
  5483. rc = -EBADR;
  5484. goto free_req;
  5485. } else if ((packet_opcode == CAM_ISP_PACKET_INIT_DEV)
  5486. && (packet->header.request_id <= ctx->last_flush_req)
  5487. && ctx->last_flush_req && packet->header.request_id) {
  5488. CAM_WARN(CAM_ISP,
  5489. "last flushed req is %lld, config dev(init) for req %lld",
  5490. ctx->last_flush_req, packet->header.request_id);
  5491. rc = -EBADR;
  5492. goto free_req;
  5493. }
  5494. cfg.packet = packet;
  5495. cfg.remain_len = remain_len;
  5496. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5497. cfg.max_hw_update_entries = ctx->max_hw_update_entries;
  5498. cfg.hw_update_entries = req_isp->cfg;
  5499. cfg.max_out_map_entries = ctx->max_out_map_entries;
  5500. cfg.max_in_map_entries = ctx->max_in_map_entries;
  5501. cfg.out_map_entries = req_isp->fence_map_out;
  5502. cfg.in_map_entries = req_isp->fence_map_in;
  5503. cfg.priv = &req_isp->hw_update_data;
  5504. cfg.pf_data = &(req->pf_data);
  5505. cfg.num_out_map_entries = 0;
  5506. cfg.num_in_map_entries = 0;
  5507. memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
  5508. rc = ctx->hw_mgr_intf->hw_prepare_update(
  5509. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  5510. if (rc != 0) {
  5511. CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
  5512. rc = -EFAULT;
  5513. goto free_req;
  5514. }
  5515. req_isp->num_cfg = cfg.num_hw_update_entries;
  5516. req_isp->num_fence_map_out = cfg.num_out_map_entries;
  5517. req_isp->num_fence_map_in = cfg.num_in_map_entries;
  5518. req_isp->num_acked = 0;
  5519. req_isp->num_deferred_acks = 0;
  5520. req_isp->bubble_detected = false;
  5521. req_isp->cdm_reset_before_apply = false;
  5522. req_isp->hw_update_data.packet = packet;
  5523. req->pf_data.packet_handle = cmd->packet_handle;
  5524. req->pf_data.packet_offset = cmd->offset;
  5525. req->pf_data.req = req;
  5526. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  5527. rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
  5528. if (rc) {
  5529. CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
  5530. req_isp->fence_map_out[i].sync_id);
  5531. goto put_ref;
  5532. }
  5533. }
  5534. CAM_DBG(CAM_ISP,
  5535. "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d",
  5536. packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
  5537. req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in);
  5538. req->request_id = packet->header.request_id;
  5539. req->status = 1;
  5540. if (req_isp->hw_update_data.packet_opcode_type ==
  5541. CAM_ISP_PACKET_INIT_DEV) {
  5542. if (ctx->state < CAM_CTX_ACTIVATED) {
  5543. rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
  5544. if (rc)
  5545. CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
  5546. ctx_isp->init_received = true;
  5547. if ((ctx_isp->vfps_aux_context) && (req->request_id > 0))
  5548. ctx_isp->resume_hw_in_flushed = true;
  5549. else
  5550. ctx_isp->resume_hw_in_flushed = false;
  5551. } else {
  5552. rc = -EINVAL;
  5553. CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state:%d",
  5554. ctx->state);
  5555. }
  5556. } else {
  5557. if ((ctx->state == CAM_CTX_FLUSHED) || (ctx->state < CAM_CTX_READY)) {
  5558. rc = -EINVAL;
  5559. CAM_ERR(CAM_ISP, "Received update req %lld in wrong state:%d",
  5560. req->request_id, ctx->state);
  5561. goto put_ref;
  5562. }
  5563. if ((ctx_isp->offline_context) || (ctx_isp->vfps_aux_context)) {
  5564. __cam_isp_ctx_enqueue_request_in_order(ctx, req, true);
  5565. } else if (ctx->ctx_crm_intf->add_req) {
  5566. memset(&add_req, 0, sizeof(add_req));
  5567. add_req.link_hdl = ctx->link_hdl;
  5568. add_req.dev_hdl = ctx->dev_hdl;
  5569. add_req.req_id = req->request_id;
  5570. rc = ctx->ctx_crm_intf->add_req(&add_req);
  5571. if (rc) {
  5572. if (rc == -EBADR)
  5573. CAM_INFO(CAM_ISP,
  5574. "Add req failed: req id=%llu, it has been flushed",
  5575. req->request_id);
  5576. else
  5577. CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
  5578. req->request_id);
  5579. } else {
  5580. __cam_isp_ctx_enqueue_request_in_order(
  5581. ctx, req, true);
  5582. }
  5583. } else {
  5584. CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu", req->request_id);
  5585. rc = -ENODEV;
  5586. }
  5587. }
  5588. if (rc)
  5589. goto put_ref;
  5590. CAM_DBG(CAM_REQ,
  5591. "Preprocessing Config req_id %lld successful on ctx %u",
  5592. req->request_id, ctx->ctx_id);
  5593. if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch))
  5594. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  5595. else if (ctx_isp->vfps_aux_context &&
  5596. (req_isp->hw_update_data.packet_opcode_type != CAM_ISP_PACKET_INIT_DEV))
  5597. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  5598. return rc;
  5599. put_ref:
  5600. for (--i; i >= 0; i--) {
  5601. if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id))
  5602. CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
  5603. req_isp->fence_map_out[i].sync_id);
  5604. }
  5605. free_req:
  5606. spin_lock_bh(&ctx->lock);
  5607. list_add_tail(&req->list, &ctx->free_req_list);
  5608. spin_unlock_bh(&ctx->lock);
  5609. return rc;
  5610. }
  5611. static int __cam_isp_ctx_allocate_mem_hw_entries(
  5612. struct cam_context *ctx,
  5613. struct cam_hw_acquire_args *param)
  5614. {
  5615. int rc = 0, i;
  5616. uint32_t max_res = 0;
  5617. uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX;
  5618. struct cam_ctx_request *req;
  5619. struct cam_ctx_request *temp_req;
  5620. struct cam_isp_ctx_req *req_isp;
  5621. if (!param->op_params.param_list[0])
  5622. max_res = CAM_ISP_CTX_RES_MAX;
  5623. else {
  5624. max_res = param->op_params.param_list[0];
  5625. if (param->op_flags & CAM_IFE_CTX_SFE_EN) {
  5626. max_res += param->op_params.param_list[1];
  5627. max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX;
  5628. }
  5629. }
  5630. ctx->max_in_map_entries = max_res;
  5631. ctx->max_out_map_entries = max_res;
  5632. ctx->max_hw_update_entries = max_hw_upd_entries;
  5633. CAM_DBG(CAM_ISP,
  5634. "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d",
  5635. max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN));
  5636. ctx->hw_update_entry = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_update_entry *),
  5637. GFP_KERNEL);
  5638. if (!ctx->hw_update_entry) {
  5639. CAM_ERR(CAM_CTXT, "%s[%d] no memory ",
  5640. ctx->dev_name, ctx->ctx_id);
  5641. return -ENOMEM;
  5642. }
  5643. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5644. ctx->hw_update_entry[i] = kcalloc(ctx->max_hw_update_entries,
  5645. sizeof(struct cam_hw_update_entry), GFP_KERNEL);
  5646. if (!ctx->hw_update_entry[i]) {
  5647. CAM_ERR(CAM_CTXT, "%s[%d] no memory for hw_update_entry: %u",
  5648. ctx->dev_name, ctx->ctx_id, i);
  5649. return -ENOMEM;
  5650. }
  5651. }
  5652. ctx->in_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5653. GFP_KERNEL);
  5654. if (!ctx->in_map_entries) {
  5655. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries",
  5656. ctx->dev_name, ctx->ctx_id);
  5657. rc = -ENOMEM;
  5658. goto end;
  5659. }
  5660. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5661. ctx->in_map_entries[i] = kcalloc(ctx->max_in_map_entries,
  5662. sizeof(struct cam_hw_fence_map_entry),
  5663. GFP_KERNEL);
  5664. if (!ctx->in_map_entries[i]) {
  5665. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries: %u",
  5666. ctx->dev_name, ctx->ctx_id, i);
  5667. rc = -ENOMEM;
  5668. goto end;
  5669. }
  5670. }
  5671. ctx->out_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5672. GFP_KERNEL);
  5673. if (!ctx->out_map_entries) {
  5674. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries",
  5675. ctx->dev_name, ctx->ctx_id);
  5676. rc = -ENOMEM;
  5677. goto end;
  5678. }
  5679. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5680. ctx->out_map_entries[i] = kcalloc(ctx->max_out_map_entries,
  5681. sizeof(struct cam_hw_fence_map_entry),
  5682. GFP_KERNEL);
  5683. if (!ctx->out_map_entries[i]) {
  5684. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries: %u",
  5685. ctx->dev_name, ctx->ctx_id, i);
  5686. rc = -ENOMEM;
  5687. goto end;
  5688. }
  5689. }
  5690. list_for_each_entry_safe(req, temp_req,
  5691. &ctx->free_req_list, list) {
  5692. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5693. req_isp->cfg = ctx->hw_update_entry[req->index];
  5694. req_isp->fence_map_in = ctx->in_map_entries[req->index];
  5695. req_isp->fence_map_out = ctx->out_map_entries[req->index];
  5696. }
  5697. return rc;
  5698. end:
  5699. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5700. return rc;
  5701. }
  5702. static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
  5703. struct cam_acquire_dev_cmd *cmd)
  5704. {
  5705. int rc = 0;
  5706. int i;
  5707. struct cam_hw_acquire_args param;
  5708. struct cam_isp_resource *isp_res = NULL;
  5709. struct cam_create_dev_hdl req_hdl_param;
  5710. struct cam_hw_release_args release;
  5711. struct cam_isp_context *ctx_isp =
  5712. (struct cam_isp_context *) ctx->ctx_priv;
  5713. struct cam_hw_cmd_args hw_cmd_args;
  5714. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5715. if (!ctx->hw_mgr_intf) {
  5716. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5717. rc = -EFAULT;
  5718. goto end;
  5719. }
  5720. CAM_DBG(CAM_ISP,
  5721. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  5722. cmd->session_handle, cmd->num_resources,
  5723. cmd->handle_type, cmd->resource_hdl);
  5724. ctx_isp->v4l2_event_sub_ids = cam_req_mgr_get_id_subscribed();
  5725. if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
  5726. ctx_isp->split_acquire = true;
  5727. CAM_DBG(CAM_ISP, "Acquire dev handle");
  5728. goto get_dev_handle;
  5729. }
  5730. if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
  5731. CAM_ERR(CAM_ISP, "Too much resources in the acquire");
  5732. rc = -ENOMEM;
  5733. goto end;
  5734. }
  5735. /* for now we only support user pointer */
  5736. if (cmd->handle_type != 1) {
  5737. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5738. rc = -EINVAL;
  5739. goto end;
  5740. }
  5741. isp_res = kzalloc(
  5742. sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
  5743. if (!isp_res) {
  5744. rc = -ENOMEM;
  5745. goto end;
  5746. }
  5747. CAM_DBG(CAM_ISP, "start copy %d resources from user",
  5748. cmd->num_resources);
  5749. if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
  5750. sizeof(*isp_res)*cmd->num_resources)) {
  5751. rc = -EFAULT;
  5752. goto free_res;
  5753. }
  5754. memset(&param, 0, sizeof(param));
  5755. param.context_data = ctx;
  5756. param.event_cb = ctx->irq_cb_intf;
  5757. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5758. param.num_acq = cmd->num_resources;
  5759. param.acquire_info = (uintptr_t) isp_res;
  5760. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5761. if (rc) {
  5762. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5763. ctx->ctx_id);
  5764. goto free_res;
  5765. }
  5766. /* call HW manager to reserve the resource */
  5767. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5768. &param);
  5769. if (rc != 0) {
  5770. CAM_ERR(CAM_ISP, "Acquire device failed");
  5771. goto free_res;
  5772. }
  5773. /* Query the context has rdi only resource */
  5774. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5775. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5776. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5777. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5778. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5779. &hw_cmd_args);
  5780. if (rc) {
  5781. CAM_ERR(CAM_ISP, "HW command failed");
  5782. goto free_hw;
  5783. }
  5784. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5785. /*
  5786. * this context has rdi only resource assign rdi only
  5787. * state machine
  5788. */
  5789. CAM_DBG(CAM_ISP, "RDI only session Context");
  5790. ctx_isp->substate_machine_irq =
  5791. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5792. ctx_isp->substate_machine =
  5793. cam_isp_ctx_rdi_only_activated_state_machine;
  5794. ctx_isp->rdi_only_context = true;
  5795. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5796. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5797. ctx_isp->substate_machine_irq =
  5798. cam_isp_ctx_fs2_state_machine_irq;
  5799. ctx_isp->substate_machine =
  5800. cam_isp_ctx_fs2_state_machine;
  5801. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5802. CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
  5803. ctx_isp->substate_machine_irq =
  5804. cam_isp_ctx_offline_state_machine_irq;
  5805. } else {
  5806. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5807. ctx_isp->substate_machine_irq =
  5808. cam_isp_ctx_activated_state_machine_irq;
  5809. ctx_isp->substate_machine =
  5810. cam_isp_ctx_activated_state_machine;
  5811. }
  5812. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5813. ctx_isp->hw_acquired = true;
  5814. ctx_isp->split_acquire = false;
  5815. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5816. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5817. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5818. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5819. kfree(isp_res);
  5820. isp_res = NULL;
  5821. get_dev_handle:
  5822. req_hdl_param.session_hdl = cmd->session_handle;
  5823. /* bridge is not ready for these flags. so false for now */
  5824. req_hdl_param.v4l2_sub_dev_flag = 0;
  5825. req_hdl_param.media_entity_flag = 0;
  5826. req_hdl_param.ops = ctx->crm_ctx_intf;
  5827. req_hdl_param.priv = ctx;
  5828. req_hdl_param.dev_id = CAM_ISP;
  5829. CAM_DBG(CAM_ISP, "get device handle form bridge");
  5830. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  5831. if (ctx->dev_hdl <= 0) {
  5832. rc = -EFAULT;
  5833. CAM_ERR(CAM_ISP, "Can not create device handle");
  5834. goto free_hw;
  5835. }
  5836. cmd->dev_handle = ctx->dev_hdl;
  5837. /* store session information */
  5838. ctx->session_hdl = cmd->session_handle;
  5839. ctx->state = CAM_CTX_ACQUIRED;
  5840. trace_cam_context_state("ISP", ctx);
  5841. CAM_DBG(CAM_ISP,
  5842. "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
  5843. cmd->session_handle, cmd->num_resources, ctx->ctx_id);
  5844. return rc;
  5845. free_hw:
  5846. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5847. if (ctx_isp->hw_acquired)
  5848. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5849. &release);
  5850. ctx_isp->hw_ctx = NULL;
  5851. ctx_isp->hw_acquired = false;
  5852. free_res:
  5853. kfree(isp_res);
  5854. end:
  5855. return rc;
  5856. }
  5857. static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
  5858. void *args)
  5859. {
  5860. int rc = 0;
  5861. int i;
  5862. struct cam_acquire_hw_cmd_v1 *cmd =
  5863. (struct cam_acquire_hw_cmd_v1 *)args;
  5864. struct cam_hw_acquire_args param;
  5865. struct cam_hw_release_args release;
  5866. struct cam_isp_context *ctx_isp =
  5867. (struct cam_isp_context *) ctx->ctx_priv;
  5868. struct cam_hw_cmd_args hw_cmd_args;
  5869. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5870. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5871. if (!ctx->hw_mgr_intf) {
  5872. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5873. rc = -EFAULT;
  5874. goto end;
  5875. }
  5876. CAM_DBG(CAM_ISP,
  5877. "session_hdl 0x%x, hdl type %d, res %lld",
  5878. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5879. /* for now we only support user pointer */
  5880. if (cmd->handle_type != 1) {
  5881. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5882. rc = -EINVAL;
  5883. goto end;
  5884. }
  5885. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5886. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5887. goto end;
  5888. }
  5889. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5890. if (!acquire_hw_info) {
  5891. rc = -ENOMEM;
  5892. goto end;
  5893. }
  5894. CAM_DBG(CAM_ISP, "start copy resources from user");
  5895. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5896. cmd->data_size)) {
  5897. rc = -EFAULT;
  5898. goto free_res;
  5899. }
  5900. memset(&param, 0, sizeof(param));
  5901. param.context_data = ctx;
  5902. param.event_cb = ctx->irq_cb_intf;
  5903. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5904. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5905. param.acquire_info_size = cmd->data_size;
  5906. param.acquire_info = (uint64_t) acquire_hw_info;
  5907. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5908. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
  5909. &param);
  5910. if (rc) {
  5911. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5912. ctx->ctx_id);
  5913. goto free_res;
  5914. }
  5915. /* call HW manager to reserve the resource */
  5916. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5917. &param);
  5918. if (rc != 0) {
  5919. CAM_ERR(CAM_ISP, "Acquire device failed");
  5920. goto free_res;
  5921. }
  5922. ctx_isp->support_consumed_addr =
  5923. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5924. /* Query the context has rdi only resource */
  5925. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5926. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5927. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5928. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5929. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5930. &hw_cmd_args);
  5931. if (rc) {
  5932. CAM_ERR(CAM_ISP, "HW command failed");
  5933. goto free_hw;
  5934. }
  5935. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5936. /*
  5937. * this context has rdi only resource assign rdi only
  5938. * state machine
  5939. */
  5940. CAM_DBG(CAM_ISP, "RDI only session Context");
  5941. ctx_isp->substate_machine_irq =
  5942. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5943. ctx_isp->substate_machine =
  5944. cam_isp_ctx_rdi_only_activated_state_machine;
  5945. ctx_isp->rdi_only_context = true;
  5946. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5947. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5948. ctx_isp->substate_machine_irq =
  5949. cam_isp_ctx_fs2_state_machine_irq;
  5950. ctx_isp->substate_machine =
  5951. cam_isp_ctx_fs2_state_machine;
  5952. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5953. CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
  5954. ctx_isp->substate_machine_irq =
  5955. cam_isp_ctx_offline_state_machine_irq;
  5956. ctx_isp->substate_machine = NULL;
  5957. } else {
  5958. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5959. ctx_isp->substate_machine_irq =
  5960. cam_isp_ctx_activated_state_machine_irq;
  5961. ctx_isp->substate_machine =
  5962. cam_isp_ctx_activated_state_machine;
  5963. }
  5964. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5965. ctx_isp->hw_acquired = true;
  5966. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5967. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5968. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5969. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5970. trace_cam_context_state("ISP", ctx);
  5971. CAM_DBG(CAM_ISP,
  5972. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5973. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5974. kfree(acquire_hw_info);
  5975. return rc;
  5976. free_hw:
  5977. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5978. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5979. ctx_isp->hw_ctx = NULL;
  5980. ctx_isp->hw_acquired = false;
  5981. free_res:
  5982. kfree(acquire_hw_info);
  5983. end:
  5984. return rc;
  5985. }
  5986. static void cam_req_mgr_process_workq_apply_req_worker(struct work_struct *w)
  5987. {
  5988. cam_req_mgr_process_workq(w);
  5989. }
  5990. static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
  5991. void *args)
  5992. {
  5993. int rc = 0, i, j;
  5994. struct cam_acquire_hw_cmd_v2 *cmd =
  5995. (struct cam_acquire_hw_cmd_v2 *)args;
  5996. struct cam_hw_acquire_args param;
  5997. struct cam_hw_release_args release;
  5998. struct cam_isp_context *ctx_isp =
  5999. (struct cam_isp_context *) ctx->ctx_priv;
  6000. struct cam_hw_cmd_args hw_cmd_args;
  6001. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6002. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  6003. struct cam_isp_comp_record_query query_cmd;
  6004. if (!ctx->hw_mgr_intf) {
  6005. CAM_ERR(CAM_ISP, "HW interface is not ready");
  6006. rc = -EFAULT;
  6007. goto end;
  6008. }
  6009. CAM_DBG(CAM_ISP,
  6010. "session_hdl 0x%x, hdl type %d, res %lld",
  6011. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  6012. /* for now we only support user pointer */
  6013. if (cmd->handle_type != 1) {
  6014. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  6015. rc = -EINVAL;
  6016. goto end;
  6017. }
  6018. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  6019. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  6020. goto end;
  6021. }
  6022. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  6023. if (!acquire_hw_info) {
  6024. rc = -ENOMEM;
  6025. goto end;
  6026. }
  6027. CAM_DBG(CAM_ISP, "start copy resources from user");
  6028. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  6029. cmd->data_size)) {
  6030. rc = -EFAULT;
  6031. goto free_res;
  6032. }
  6033. memset(&param, 0, sizeof(param));
  6034. param.context_data = ctx;
  6035. param.event_cb = ctx->irq_cb_intf;
  6036. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  6037. param.num_acq = CAM_API_COMPAT_CONSTANT;
  6038. param.acquire_info_size = cmd->data_size;
  6039. param.acquire_info = (uint64_t) acquire_hw_info;
  6040. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  6041. /* call HW manager to reserve the resource */
  6042. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  6043. &param);
  6044. if (rc != 0) {
  6045. CAM_ERR(CAM_ISP, "Acquire device failed");
  6046. goto free_res;
  6047. }
  6048. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  6049. if (rc) {
  6050. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  6051. ctx->ctx_id);
  6052. goto free_hw;
  6053. }
  6054. /*
  6055. * Set feature flag if applicable
  6056. * custom hw is supported only on v2
  6057. */
  6058. ctx_isp->custom_enabled =
  6059. (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
  6060. ctx_isp->use_frame_header_ts =
  6061. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  6062. ctx_isp->use_default_apply =
  6063. (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG);
  6064. ctx_isp->support_consumed_addr =
  6065. (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
  6066. ctx_isp->aeb_enabled =
  6067. (param.op_flags & CAM_IFE_CTX_AEB_EN);
  6068. /* Query the context bus comp group information */
  6069. ctx_isp->vfe_bus_comp_grp = kcalloc(CAM_IFE_BUS_COMP_NUM_MAX,
  6070. sizeof(struct cam_isp_context_comp_record), GFP_KERNEL);
  6071. if (!ctx_isp->vfe_bus_comp_grp) {
  6072. CAM_ERR(CAM_CTXT, "%s[%d] no memory for vfe_bus_comp_grp",
  6073. ctx->dev_name, ctx->ctx_id);
  6074. rc = -ENOMEM;
  6075. goto end;
  6076. }
  6077. if (param.op_flags & CAM_IFE_CTX_SFE_EN) {
  6078. ctx_isp->sfe_bus_comp_grp = kcalloc(CAM_SFE_BUS_COMP_NUM_MAX,
  6079. sizeof(struct cam_isp_context_comp_record), GFP_KERNEL);
  6080. if (!ctx_isp->sfe_bus_comp_grp) {
  6081. CAM_ERR(CAM_CTXT, "%s[%d] no memory for sfe_bus_comp_grp",
  6082. ctx->dev_name, ctx->ctx_id);
  6083. rc = -ENOMEM;
  6084. goto end;
  6085. }
  6086. }
  6087. query_cmd.vfe_bus_comp_grp = ctx_isp->vfe_bus_comp_grp;
  6088. if (ctx_isp->sfe_bus_comp_grp)
  6089. query_cmd.sfe_bus_comp_grp = ctx_isp->sfe_bus_comp_grp;
  6090. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  6091. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6092. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_BUS_COMP_GROUP;
  6093. isp_hw_cmd_args.cmd_data = &query_cmd;
  6094. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6095. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6096. &hw_cmd_args);
  6097. if (rc) {
  6098. CAM_ERR(CAM_ISP, "HW command failed");
  6099. goto free_hw;
  6100. }
  6101. /* Query the context has rdi only resource */
  6102. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  6103. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6104. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  6105. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6106. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6107. &hw_cmd_args);
  6108. if (rc) {
  6109. CAM_ERR(CAM_ISP, "HW command failed");
  6110. goto free_hw;
  6111. }
  6112. if (param.valid_acquired_hw) {
  6113. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  6114. cmd->hw_info.acquired_hw_id[i] =
  6115. param.acquired_hw_id[i];
  6116. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  6117. for (j = 0; j < CAM_MAX_HW_SPLIT; j++)
  6118. cmd->hw_info.acquired_hw_path[i][j] =
  6119. param.acquired_hw_path[i][j];
  6120. }
  6121. cmd->hw_info.valid_acquired_hw =
  6122. param.valid_acquired_hw;
  6123. cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw;
  6124. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  6125. /*
  6126. * this context has rdi only resource assign rdi only
  6127. * state machine
  6128. */
  6129. CAM_DBG(CAM_ISP, "RDI only session Context");
  6130. ctx_isp->substate_machine_irq =
  6131. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  6132. ctx_isp->substate_machine =
  6133. cam_isp_ctx_rdi_only_activated_state_machine;
  6134. ctx_isp->rdi_only_context = true;
  6135. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  6136. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  6137. ctx_isp->substate_machine_irq =
  6138. cam_isp_ctx_fs2_state_machine_irq;
  6139. ctx_isp->substate_machine =
  6140. cam_isp_ctx_fs2_state_machine;
  6141. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  6142. CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
  6143. ctx_isp->substate_machine_irq =
  6144. cam_isp_ctx_offline_state_machine_irq;
  6145. ctx_isp->substate_machine = NULL;
  6146. ctx_isp->offline_context = true;
  6147. } else {
  6148. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  6149. ctx_isp->substate_machine_irq =
  6150. cam_isp_ctx_activated_state_machine_irq;
  6151. ctx_isp->substate_machine =
  6152. cam_isp_ctx_activated_state_machine;
  6153. }
  6154. if (ctx_isp->offline_context || ctx_isp->vfps_aux_context) {
  6155. rc = cam_req_mgr_workq_create("ife_apply_req", 20,
  6156. &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0,
  6157. cam_req_mgr_process_workq_apply_req_worker);
  6158. if (rc)
  6159. CAM_ERR(CAM_ISP,
  6160. "Failed to create workq for IFE rc:%d offline: %s vfps: %s",
  6161. rc, CAM_BOOL_TO_YESNO(ctx_isp->offline_context),
  6162. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context));
  6163. }
  6164. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  6165. ctx_isp->hw_acquired = true;
  6166. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  6167. trace_cam_context_state("ISP", ctx);
  6168. CAM_DBG(CAM_ISP,
  6169. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  6170. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  6171. kfree(acquire_hw_info);
  6172. return rc;
  6173. free_hw:
  6174. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6175. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  6176. ctx_isp->hw_ctx = NULL;
  6177. ctx_isp->hw_acquired = false;
  6178. free_res:
  6179. kfree(acquire_hw_info);
  6180. end:
  6181. return rc;
  6182. }
  6183. static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
  6184. void *args)
  6185. {
  6186. int rc = -EINVAL;
  6187. uint32_t api_version;
  6188. if (!ctx || !args) {
  6189. CAM_ERR(CAM_ISP, "Invalid input pointer");
  6190. return rc;
  6191. }
  6192. api_version = *((uint32_t *)args);
  6193. if (api_version == 1)
  6194. rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
  6195. else if (api_version == 2)
  6196. rc = __cam_isp_ctx_acquire_hw_v2(ctx, args);
  6197. else
  6198. CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
  6199. return rc;
  6200. }
  6201. static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
  6202. struct cam_config_dev_cmd *cmd)
  6203. {
  6204. int rc = 0;
  6205. struct cam_isp_context *ctx_isp =
  6206. (struct cam_isp_context *) ctx->ctx_priv;
  6207. if (!ctx_isp->hw_acquired) {
  6208. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  6209. return -EINVAL;
  6210. }
  6211. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  6212. if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
  6213. ctx->state = CAM_CTX_READY;
  6214. trace_cam_context_state("ISP", ctx);
  6215. }
  6216. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  6217. return rc;
  6218. }
  6219. static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
  6220. struct cam_config_dev_cmd *cmd)
  6221. {
  6222. int rc = 0;
  6223. struct cam_start_stop_dev_cmd start_cmd;
  6224. struct cam_hw_cmd_args hw_cmd_args;
  6225. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6226. struct cam_isp_context *ctx_isp =
  6227. (struct cam_isp_context *) ctx->ctx_priv;
  6228. if (!ctx_isp->hw_acquired) {
  6229. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  6230. rc = -EINVAL;
  6231. goto end;
  6232. }
  6233. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  6234. if (rc)
  6235. goto end;
  6236. if (!ctx_isp->init_received) {
  6237. CAM_WARN(CAM_ISP,
  6238. "Received update packet in flushed state, skip start");
  6239. goto end;
  6240. }
  6241. CAM_DBG(CAM_ISP, "vfps_ctx:%s resume_hw_in_flushed:%d ctx:%d",
  6242. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context),
  6243. ctx_isp->resume_hw_in_flushed,
  6244. ctx->ctx_id);
  6245. if (ctx_isp->vfps_aux_context) {
  6246. /* Resume the HW only when we get first valid req */
  6247. if (!ctx_isp->resume_hw_in_flushed)
  6248. goto end;
  6249. else
  6250. ctx_isp->resume_hw_in_flushed = false;
  6251. }
  6252. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6253. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6254. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6255. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6256. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6257. &hw_cmd_args);
  6258. if (rc) {
  6259. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
  6260. goto end;
  6261. }
  6262. start_cmd.dev_handle = cmd->dev_handle;
  6263. start_cmd.session_handle = cmd->session_handle;
  6264. rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
  6265. if (rc)
  6266. CAM_ERR(CAM_ISP,
  6267. "Failed to re-start HW after flush rc: %d", rc);
  6268. else
  6269. CAM_INFO(CAM_ISP,
  6270. "Received init after flush. Re-start HW complete in ctx:%d",
  6271. ctx->ctx_id);
  6272. end:
  6273. CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
  6274. ctx_isp->substate_activated);
  6275. return rc;
  6276. }
  6277. static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
  6278. struct cam_req_mgr_core_dev_link_setup *link)
  6279. {
  6280. int rc = 0;
  6281. struct cam_isp_context *ctx_isp =
  6282. (struct cam_isp_context *) ctx->ctx_priv;
  6283. if (!link) {
  6284. CAM_ERR(CAM_ISP, "setup link info is null: %pK ctx: %u",
  6285. link, ctx->ctx_id);
  6286. return -EINVAL;
  6287. }
  6288. if (!link->crm_cb) {
  6289. CAM_ERR(CAM_ISP, "crm cb is null: %pK ctx: %u",
  6290. link->crm_cb, ctx->ctx_id);
  6291. return -EINVAL;
  6292. }
  6293. CAM_DBG(CAM_ISP, "Enter.........");
  6294. ctx->link_hdl = link->link_hdl;
  6295. ctx->ctx_crm_intf = link->crm_cb;
  6296. ctx_isp->subscribe_event =
  6297. CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF;
  6298. ctx_isp->trigger_id = link->trigger_id;
  6299. /* change state only if we had the init config */
  6300. if (ctx_isp->init_received) {
  6301. ctx->state = CAM_CTX_READY;
  6302. trace_cam_context_state("ISP", ctx);
  6303. }
  6304. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  6305. return rc;
  6306. }
  6307. static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
  6308. struct cam_req_mgr_core_dev_link_setup *unlink)
  6309. {
  6310. int rc = 0;
  6311. struct cam_isp_context *ctx_isp =
  6312. (struct cam_isp_context *) ctx->ctx_priv;
  6313. ctx->link_hdl = -1;
  6314. ctx->ctx_crm_intf = NULL;
  6315. ctx_isp->trigger_id = -1;
  6316. return rc;
  6317. }
  6318. static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  6319. struct cam_req_mgr_device_info *dev_info)
  6320. {
  6321. int rc = 0;
  6322. dev_info->dev_hdl = ctx->dev_hdl;
  6323. strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
  6324. dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
  6325. dev_info->p_delay = 1;
  6326. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  6327. dev_info->trigger_on = true;
  6328. return rc;
  6329. }
  6330. static inline void __cam_isp_context_reset_ctx_params(
  6331. struct cam_isp_context *ctx_isp)
  6332. {
  6333. atomic_set(&ctx_isp->process_bubble, 0);
  6334. atomic_set(&ctx_isp->rxd_epoch, 0);
  6335. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6336. ctx_isp->frame_id = 0;
  6337. ctx_isp->sof_timestamp_val = 0;
  6338. ctx_isp->boot_timestamp = 0;
  6339. ctx_isp->active_req_cnt = 0;
  6340. ctx_isp->reported_req_id = 0;
  6341. ctx_isp->reported_frame_id = 0;
  6342. ctx_isp->bubble_frame_cnt = 0;
  6343. ctx_isp->recovery_req_id = 0;
  6344. ctx_isp->aeb_error_cnt = 0;
  6345. ctx_isp->sof_dbg_irq_en = false;
  6346. ctx_isp->last_sof_jiffies = 0;
  6347. ctx_isp->last_applied_jiffies = 0;
  6348. }
  6349. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  6350. struct cam_start_stop_dev_cmd *cmd)
  6351. {
  6352. int rc = 0;
  6353. int i;
  6354. struct cam_isp_start_args start_isp;
  6355. struct cam_ctx_request *req;
  6356. struct cam_isp_ctx_req *req_isp;
  6357. struct cam_isp_context *ctx_isp =
  6358. (struct cam_isp_context *) ctx->ctx_priv;
  6359. if (cmd->session_handle != ctx->session_hdl ||
  6360. cmd->dev_handle != ctx->dev_hdl) {
  6361. rc = -EPERM;
  6362. goto end;
  6363. }
  6364. if (list_empty(&ctx->pending_req_list)) {
  6365. /* should never happen */
  6366. CAM_ERR(CAM_ISP, "Start device with empty configuration");
  6367. rc = -EFAULT;
  6368. goto end;
  6369. } else {
  6370. req = list_first_entry(&ctx->pending_req_list,
  6371. struct cam_ctx_request, list);
  6372. }
  6373. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6374. if (!ctx_isp->hw_ctx) {
  6375. CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
  6376. rc = -EFAULT;
  6377. goto end;
  6378. }
  6379. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6380. start_isp.hw_config.request_id = req->request_id;
  6381. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6382. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6383. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6384. start_isp.hw_config.init_packet = 1;
  6385. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_NONE;
  6386. start_isp.hw_config.cdm_reset_before_apply = false;
  6387. start_isp.is_internal_start = false;
  6388. ctx_isp->last_applied_req_id = req->request_id;
  6389. if (ctx->state == CAM_CTX_FLUSHED)
  6390. start_isp.start_only = true;
  6391. else
  6392. start_isp.start_only = false;
  6393. __cam_isp_context_reset_ctx_params(ctx_isp);
  6394. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6395. CAM_ISP_CTX_ACTIVATED_APPLIED :
  6396. (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
  6397. CAM_ISP_CTX_ACTIVATED_SOF;
  6398. atomic64_set(&ctx_isp->state_monitor_head, -1);
  6399. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6400. atomic64_set(&ctx_isp->event_record_head[i], -1);
  6401. /*
  6402. * In case of CSID TPG we might receive SOF and RUP IRQs
  6403. * before hw_mgr_intf->hw_start has returned. So move
  6404. * req out of pending list before hw_start and add it
  6405. * back to pending list if hw_start fails.
  6406. */
  6407. list_del_init(&req->list);
  6408. if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
  6409. list_add_tail(&req->list, &ctx->free_req_list);
  6410. atomic_set(&ctx_isp->rxd_epoch, 1);
  6411. CAM_DBG(CAM_REQ,
  6412. "Move pending req: %lld to free list(cnt: %d) offline ctx %u",
  6413. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  6414. } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
  6415. list_add_tail(&req->list, &ctx->wait_req_list);
  6416. CAM_DBG(CAM_REQ,
  6417. "Move pending req: %lld to wait list(cnt: %d) ctx %u",
  6418. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  6419. } else {
  6420. list_add_tail(&req->list, &ctx->active_req_list);
  6421. ctx_isp->active_req_cnt++;
  6422. CAM_DBG(CAM_REQ,
  6423. "Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
  6424. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
  6425. ctx_isp->offline_context);
  6426. }
  6427. /*
  6428. * Only place to change state before calling the hw due to
  6429. * hardware tasklet has higher priority that can cause the
  6430. * irq handling comes early
  6431. */
  6432. ctx->state = CAM_CTX_ACTIVATED;
  6433. trace_cam_context_state("ISP", ctx);
  6434. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6435. &start_isp);
  6436. if (rc) {
  6437. /* HW failure. user need to clean up the resource */
  6438. CAM_ERR(CAM_ISP, "Start HW failed");
  6439. ctx->state = CAM_CTX_READY;
  6440. if ((rc == -ETIMEDOUT) &&
  6441. (isp_ctx_debug.enable_cdm_cmd_buff_dump))
  6442. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  6443. trace_cam_context_state("ISP", ctx);
  6444. list_del_init(&req->list);
  6445. list_add(&req->list, &ctx->pending_req_list);
  6446. goto end;
  6447. }
  6448. CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
  6449. end:
  6450. return rc;
  6451. }
  6452. static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
  6453. struct cam_req_mgr_core_dev_link_setup *unlink)
  6454. {
  6455. int rc = 0;
  6456. ctx->link_hdl = -1;
  6457. ctx->ctx_crm_intf = NULL;
  6458. ctx->state = CAM_CTX_ACQUIRED;
  6459. trace_cam_context_state("ISP", ctx);
  6460. return rc;
  6461. }
  6462. static int __cam_isp_ctx_stop_dev_in_activated_unlock(
  6463. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  6464. {
  6465. int rc = 0;
  6466. uint32_t i;
  6467. struct cam_hw_stop_args stop;
  6468. struct cam_ctx_request *req;
  6469. struct cam_isp_ctx_req *req_isp;
  6470. struct cam_isp_context *ctx_isp =
  6471. (struct cam_isp_context *) ctx->ctx_priv;
  6472. struct cam_isp_stop_args stop_isp;
  6473. /* Mask off all the incoming hardware events */
  6474. spin_lock_bh(&ctx->lock);
  6475. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6476. spin_unlock_bh(&ctx->lock);
  6477. /* stop hw first */
  6478. if (ctx_isp->hw_ctx) {
  6479. stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6480. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6481. stop_isp.stop_only = false;
  6482. stop_isp.is_internal_stop = false;
  6483. stop.args = (void *) &stop_isp;
  6484. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6485. &stop);
  6486. }
  6487. CAM_DBG(CAM_ISP, "next Substate[%s]",
  6488. __cam_isp_ctx_substate_val_to_type(
  6489. ctx_isp->substate_activated));
  6490. if (ctx->ctx_crm_intf &&
  6491. ctx->ctx_crm_intf->notify_stop) {
  6492. struct cam_req_mgr_notify_stop notify;
  6493. notify.link_hdl = ctx->link_hdl;
  6494. CAM_DBG(CAM_ISP,
  6495. "Notify CRM about device stop ctx %u link 0x%x",
  6496. ctx->ctx_id, ctx->link_hdl);
  6497. ctx->ctx_crm_intf->notify_stop(&notify);
  6498. } else if (!ctx_isp->offline_context)
  6499. CAM_ERR(CAM_ISP, "cb not present");
  6500. while (!list_empty(&ctx->pending_req_list)) {
  6501. req = list_first_entry(&ctx->pending_req_list,
  6502. struct cam_ctx_request, list);
  6503. list_del_init(&req->list);
  6504. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6505. CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
  6506. req_isp->num_fence_map_out);
  6507. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6508. if (req_isp->fence_map_out[i].sync_id != -1) {
  6509. cam_sync_signal(
  6510. req_isp->fence_map_out[i].sync_id,
  6511. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6512. CAM_SYNC_ISP_EVENT_HW_STOP);
  6513. }
  6514. list_add_tail(&req->list, &ctx->free_req_list);
  6515. }
  6516. while (!list_empty(&ctx->wait_req_list)) {
  6517. req = list_first_entry(&ctx->wait_req_list,
  6518. struct cam_ctx_request, list);
  6519. list_del_init(&req->list);
  6520. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6521. CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
  6522. req_isp->num_fence_map_out);
  6523. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6524. if (req_isp->fence_map_out[i].sync_id != -1) {
  6525. cam_sync_signal(
  6526. req_isp->fence_map_out[i].sync_id,
  6527. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6528. CAM_SYNC_ISP_EVENT_HW_STOP);
  6529. }
  6530. list_add_tail(&req->list, &ctx->free_req_list);
  6531. }
  6532. while (!list_empty(&ctx->active_req_list)) {
  6533. req = list_first_entry(&ctx->active_req_list,
  6534. struct cam_ctx_request, list);
  6535. list_del_init(&req->list);
  6536. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6537. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  6538. req_isp->num_fence_map_out);
  6539. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6540. if (req_isp->fence_map_out[i].sync_id != -1) {
  6541. cam_sync_signal(
  6542. req_isp->fence_map_out[i].sync_id,
  6543. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6544. CAM_SYNC_ISP_EVENT_HW_STOP);
  6545. }
  6546. list_add_tail(&req->list, &ctx->free_req_list);
  6547. }
  6548. ctx_isp->frame_id = 0;
  6549. ctx_isp->active_req_cnt = 0;
  6550. ctx_isp->reported_req_id = 0;
  6551. ctx_isp->reported_frame_id = 0;
  6552. ctx_isp->last_applied_req_id = 0;
  6553. ctx_isp->req_info.last_bufdone_req_id = 0;
  6554. ctx_isp->bubble_frame_cnt = 0;
  6555. ctx_isp->sof_dbg_irq_en = false;
  6556. atomic_set(&ctx_isp->process_bubble, 0);
  6557. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6558. atomic_set(&ctx_isp->rxd_epoch, 0);
  6559. atomic64_set(&ctx_isp->state_monitor_head, -1);
  6560. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6561. atomic64_set(&ctx_isp->event_record_head[i], -1);
  6562. CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
  6563. ctx->state, ctx->ctx_id);
  6564. if (!stop_cmd) {
  6565. rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL);
  6566. if (rc)
  6567. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6568. }
  6569. return rc;
  6570. }
  6571. static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
  6572. struct cam_start_stop_dev_cmd *cmd)
  6573. {
  6574. int rc = 0;
  6575. struct cam_isp_context *ctx_isp =
  6576. (struct cam_isp_context *)ctx->ctx_priv;
  6577. __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
  6578. ctx_isp->init_received = false;
  6579. ctx->state = CAM_CTX_ACQUIRED;
  6580. trace_cam_context_state("ISP", ctx);
  6581. return rc;
  6582. }
  6583. static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
  6584. struct cam_release_dev_cmd *cmd)
  6585. {
  6586. int rc = 0;
  6587. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6588. if (rc)
  6589. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6590. rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
  6591. if (rc)
  6592. CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
  6593. return rc;
  6594. }
  6595. static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
  6596. void *cmd)
  6597. {
  6598. int rc = 0;
  6599. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6600. if (rc)
  6601. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6602. rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
  6603. if (rc)
  6604. CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
  6605. return rc;
  6606. }
  6607. static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
  6608. {
  6609. int rc = 0;
  6610. struct cam_hw_cmd_args hw_cmd_args;
  6611. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6612. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6613. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6614. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
  6615. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6616. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6617. &hw_cmd_args);
  6618. return rc;
  6619. }
  6620. static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
  6621. {
  6622. int rc = 0;
  6623. struct cam_hw_cmd_args hw_cmd_args;
  6624. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6625. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6626. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6627. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6628. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6629. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6630. &hw_cmd_args);
  6631. return rc;
  6632. }
  6633. static int __cam_isp_ctx_reset_and_recover(
  6634. bool skip_resume, struct cam_context *ctx)
  6635. {
  6636. int rc = 0;
  6637. struct cam_isp_context *ctx_isp =
  6638. (struct cam_isp_context *)ctx->ctx_priv;
  6639. struct cam_isp_stop_args stop_isp;
  6640. struct cam_hw_stop_args stop_args;
  6641. struct cam_isp_start_args start_isp;
  6642. struct cam_hw_cmd_args hw_cmd_args;
  6643. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6644. struct cam_ctx_request *req;
  6645. struct cam_isp_ctx_req *req_isp;
  6646. spin_lock_bh(&ctx->lock);
  6647. if (ctx_isp->active_req_cnt) {
  6648. spin_unlock_bh(&ctx->lock);
  6649. CAM_WARN(CAM_ISP,
  6650. "Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done",
  6651. ctx_isp->active_req_cnt, ctx->ctx_id,
  6652. ctx->link_hdl, ctx_isp->recovery_req_id);
  6653. goto end;
  6654. }
  6655. if (ctx->state != CAM_CTX_ACTIVATED) {
  6656. spin_unlock_bh(&ctx->lock);
  6657. CAM_ERR(CAM_ISP,
  6658. "In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld",
  6659. ctx->state, ctx->ctx_id,
  6660. ctx->link_hdl, ctx_isp->recovery_req_id);
  6661. rc = -EINVAL;
  6662. goto end;
  6663. }
  6664. if (list_empty(&ctx->pending_req_list)) {
  6665. /* Cannot start with no request */
  6666. spin_unlock_bh(&ctx->lock);
  6667. CAM_ERR(CAM_ISP,
  6668. "Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x",
  6669. ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl);
  6670. rc = -EFAULT;
  6671. goto end;
  6672. }
  6673. if (!ctx_isp->hw_ctx) {
  6674. spin_unlock_bh(&ctx->lock);
  6675. CAM_ERR(CAM_ISP,
  6676. "Invalid hw context pointer ctx: %u on link: 0x%x",
  6677. ctx->ctx_id, ctx->link_hdl);
  6678. rc = -EFAULT;
  6679. goto end;
  6680. }
  6681. /* Block all events till HW is resumed */
  6682. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6683. req = list_first_entry(&ctx->pending_req_list,
  6684. struct cam_ctx_request, list);
  6685. spin_unlock_bh(&ctx->lock);
  6686. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6687. CAM_INFO(CAM_ISP,
  6688. "Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x",
  6689. req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl);
  6690. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6691. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6692. stop_isp.stop_only = true;
  6693. stop_isp.is_internal_stop = true;
  6694. stop_args.args = (void *)&stop_isp;
  6695. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6696. &stop_args);
  6697. if (rc) {
  6698. CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u",
  6699. rc, ctx->ctx_id);
  6700. goto end;
  6701. }
  6702. CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x",
  6703. ctx->ctx_id, ctx->link_hdl);
  6704. /* API provides provision to stream off and not resume as well in case of fatal errors */
  6705. if (skip_resume) {
  6706. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6707. CAM_INFO(CAM_ISP,
  6708. "Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x",
  6709. ctx->ctx_id, ctx_isp->last_applied_req_id,
  6710. ctx_isp->recovery_req_id, ctx->link_hdl);
  6711. goto end;
  6712. }
  6713. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6714. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6715. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6716. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6717. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6718. &hw_cmd_args);
  6719. if (rc) {
  6720. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u", rc, ctx->ctx_id);
  6721. goto end;
  6722. }
  6723. CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x",
  6724. ctx->ctx_id, ctx->link_hdl);
  6725. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6726. start_isp.hw_config.request_id = req->request_id;
  6727. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6728. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6729. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6730. start_isp.hw_config.init_packet = 1;
  6731. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ;
  6732. start_isp.hw_config.cdm_reset_before_apply = false;
  6733. start_isp.start_only = true;
  6734. start_isp.is_internal_start = true;
  6735. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  6736. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6737. CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
  6738. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6739. &start_isp);
  6740. if (rc) {
  6741. CAM_ERR(CAM_ISP, "Start HW failed");
  6742. ctx->state = CAM_CTX_READY;
  6743. goto end;
  6744. }
  6745. /* IQ applied for this request, on next trigger skip IQ cfg */
  6746. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  6747. /* Notify userland that KMD has done internal recovery */
  6748. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY,
  6749. 0, req->request_id, ctx);
  6750. CAM_INFO(CAM_ISP, "Internal Start HW success ctx %u on link: 0x%x for req: %llu",
  6751. ctx->ctx_id, ctx->link_hdl, req->request_id);
  6752. end:
  6753. return rc;
  6754. }
  6755. static bool __cam_isp_ctx_try_internal_recovery_for_bubble(
  6756. int64_t error_req_id, struct cam_context *ctx)
  6757. {
  6758. int rc;
  6759. struct cam_isp_context *ctx_isp =
  6760. (struct cam_isp_context *)ctx->ctx_priv;
  6761. if (isp_ctx_debug.disable_internal_recovery_mask &
  6762. CAM_ISP_CTX_DISABLE_RECOVERY_BUBBLE)
  6763. return false;
  6764. /* Perform recovery if bubble recovery is stalled */
  6765. if (!atomic_read(&ctx_isp->process_bubble))
  6766. return false;
  6767. /* Validate if errored request has been applied */
  6768. if (ctx_isp->last_applied_req_id < error_req_id) {
  6769. CAM_WARN(CAM_ISP,
  6770. "Skip trying for internal recovery last applied: %lld error_req: %lld for ctx: %u on link: 0x%x",
  6771. ctx_isp->last_applied_req_id, error_req_id,
  6772. ctx->ctx_id, ctx->link_hdl);
  6773. return false;
  6774. }
  6775. if (__cam_isp_ctx_validate_for_req_reapply_util(ctx_isp)) {
  6776. CAM_WARN(CAM_ISP,
  6777. "Internal recovery not possible for ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6778. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6779. return false;
  6780. }
  6781. /* Trigger reset and recover */
  6782. atomic_set(&ctx_isp->internal_recovery_set, 1);
  6783. rc = __cam_isp_ctx_reset_and_recover(false, ctx);
  6784. if (rc) {
  6785. CAM_WARN(CAM_ISP,
  6786. "Internal recovery failed in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6787. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6788. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6789. goto error;
  6790. }
  6791. CAM_DBG(CAM_ISP,
  6792. "Internal recovery done in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6793. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6794. return true;
  6795. error:
  6796. return false;
  6797. }
  6798. static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
  6799. struct cam_req_mgr_link_evt_data *link_evt_data)
  6800. {
  6801. int rc = 0;
  6802. struct cam_isp_context *ctx_isp =
  6803. (struct cam_isp_context *) ctx->ctx_priv;
  6804. if ((ctx->state == CAM_CTX_ACQUIRED) &&
  6805. (link_evt_data->evt_type != CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES)) {
  6806. CAM_WARN(CAM_ISP,
  6807. "Get unexpect evt:%d in acquired state",
  6808. link_evt_data->evt_type);
  6809. return -EINVAL;
  6810. }
  6811. switch (link_evt_data->evt_type) {
  6812. case CAM_REQ_MGR_LINK_EVT_ERR:
  6813. case CAM_REQ_MGR_LINK_EVT_EOF:
  6814. /* No handling */
  6815. break;
  6816. case CAM_REQ_MGR_LINK_EVT_PAUSE:
  6817. rc = __cam_isp_ctx_link_pause(ctx);
  6818. break;
  6819. case CAM_REQ_MGR_LINK_EVT_RESUME:
  6820. rc = __cam_isp_ctx_link_resume(ctx);
  6821. break;
  6822. case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
  6823. rc = __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  6824. break;
  6825. case CAM_REQ_MGR_LINK_EVT_STALLED: {
  6826. bool internal_recovery_skipped = false;
  6827. if (ctx->state == CAM_CTX_ACTIVATED) {
  6828. if (link_evt_data->try_for_recovery)
  6829. internal_recovery_skipped =
  6830. __cam_isp_ctx_try_internal_recovery_for_bubble(
  6831. link_evt_data->req_id, ctx);
  6832. if (!internal_recovery_skipped)
  6833. rc = __cam_isp_ctx_trigger_reg_dump(
  6834. CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  6835. }
  6836. link_evt_data->try_for_recovery = internal_recovery_skipped;
  6837. }
  6838. break;
  6839. case CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES:
  6840. if (link_evt_data->u.properties_mask &
  6841. CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF)
  6842. ctx_isp->vfps_aux_context = true;
  6843. else
  6844. ctx_isp->vfps_aux_context = false;
  6845. CAM_DBG(CAM_ISP, "vfps_aux_context:%s on ctx: %u",
  6846. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context), ctx->ctx_id);
  6847. break;
  6848. default:
  6849. CAM_WARN(CAM_ISP,
  6850. "Unsupported event type: 0x%x on ctx: %u",
  6851. link_evt_data->evt_type, ctx->ctx_id);
  6852. rc = -EINVAL;
  6853. break;
  6854. }
  6855. return rc;
  6856. }
  6857. static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
  6858. struct cam_req_mgr_core_dev_link_setup *unlink)
  6859. {
  6860. int rc = 0;
  6861. CAM_WARN(CAM_ISP,
  6862. "Received unlink in activated state. It's unexpected");
  6863. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6864. if (rc)
  6865. CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
  6866. rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
  6867. if (rc)
  6868. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6869. return rc;
  6870. }
  6871. static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
  6872. struct cam_req_mgr_apply_request *apply)
  6873. {
  6874. int rc = 0;
  6875. struct cam_ctx_ops *ctx_ops = NULL;
  6876. struct cam_isp_context *ctx_isp =
  6877. (struct cam_isp_context *) ctx->ctx_priv;
  6878. trace_cam_apply_req("ISP", ctx->ctx_id, apply->request_id, apply->link_hdl);
  6879. CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld",
  6880. __cam_isp_ctx_substate_val_to_type(
  6881. ctx_isp->substate_activated), apply->request_id);
  6882. ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
  6883. if (ctx_ops->crm_ops.apply_req) {
  6884. rc = ctx_ops->crm_ops.apply_req(ctx, apply);
  6885. } else {
  6886. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6887. "No handle function in activated Substate[%s]",
  6888. __cam_isp_ctx_substate_val_to_type(
  6889. ctx_isp->substate_activated));
  6890. rc = -EFAULT;
  6891. }
  6892. if (rc)
  6893. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6894. "Apply failed in active Substate[%s] rc %d",
  6895. __cam_isp_ctx_substate_val_to_type(
  6896. ctx_isp->substate_activated), rc);
  6897. return rc;
  6898. }
  6899. static int __cam_isp_ctx_apply_default_settings(
  6900. struct cam_context *ctx,
  6901. struct cam_req_mgr_apply_request *apply)
  6902. {
  6903. int rc = 0;
  6904. struct cam_ctx_ops *ctx_ops = NULL;
  6905. struct cam_isp_context *ctx_isp =
  6906. (struct cam_isp_context *) ctx->ctx_priv;
  6907. if (!(apply->trigger_point & ctx_isp->subscribe_event)) {
  6908. CAM_WARN(CAM_ISP,
  6909. "Trigger: %u not subscribed for: %u",
  6910. apply->trigger_point, ctx_isp->subscribe_event);
  6911. return 0;
  6912. }
  6913. /* Allow apply default settings for IFE only at SOF */
  6914. if (apply->trigger_point != CAM_TRIGGER_POINT_SOF)
  6915. return 0;
  6916. if (atomic_read(&ctx_isp->internal_recovery_set))
  6917. return __cam_isp_ctx_reset_and_recover(false, ctx);
  6918. if (ctx_isp->use_default_apply) {
  6919. CAM_DBG(CAM_ISP,
  6920. "Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x",
  6921. ctx_isp->substate_activated, apply->request_id,
  6922. ctx->ctx_id, ctx->link_hdl);
  6923. ctx_ops = &ctx_isp->substate_machine[
  6924. ctx_isp->substate_activated];
  6925. if (ctx_ops->crm_ops.notify_frame_skip) {
  6926. rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply);
  6927. } else {
  6928. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6929. "No handle function in activated substate %d",
  6930. ctx_isp->substate_activated);
  6931. rc = -EFAULT;
  6932. }
  6933. if (rc)
  6934. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6935. "Apply default failed in active substate %d rc %d",
  6936. ctx_isp->substate_activated, rc);
  6937. }
  6938. return rc;
  6939. }
  6940. static int __cam_isp_ctx_handle_irq_in_activated(void *context,
  6941. uint32_t evt_id, void *evt_data)
  6942. {
  6943. int rc = 0;
  6944. struct cam_isp_ctx_irq_ops *irq_ops = NULL;
  6945. struct cam_context *ctx = (struct cam_context *)context;
  6946. struct cam_isp_context *ctx_isp =
  6947. (struct cam_isp_context *)ctx->ctx_priv;
  6948. spin_lock(&ctx->lock);
  6949. trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
  6950. __cam_isp_ctx_get_event_ts(evt_id, evt_data));
  6951. CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d, ctx:%d",
  6952. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6953. ctx_isp->substate_activated), evt_id,
  6954. ctx->ctx_id);
  6955. irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
  6956. if (irq_ops->irq_ops[evt_id]) {
  6957. rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
  6958. } else {
  6959. CAM_DBG(CAM_ISP,
  6960. "No handle function for Substate[%s], evt id %d, ctx:%d",
  6961. __cam_isp_ctx_substate_val_to_type(
  6962. ctx_isp->substate_activated), evt_id,
  6963. ctx->ctx_id);
  6964. if (isp_ctx_debug.enable_state_monitor_dump)
  6965. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  6966. }
  6967. CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s], ctx:%d",
  6968. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6969. ctx_isp->substate_activated), ctx->ctx_id);
  6970. spin_unlock(&ctx->lock);
  6971. return rc;
  6972. }
  6973. static int cam_isp_context_validate_event_notify_injection(struct cam_context *ctx,
  6974. struct cam_hw_inject_evt_param *evt_params)
  6975. {
  6976. int rc = 0;
  6977. uint32_t evt_type;
  6978. uint64_t req_id;
  6979. req_id = evt_params->req_id;
  6980. evt_type = evt_params->u.evt_notify.evt_notify_type;
  6981. switch (evt_type) {
  6982. case V4L_EVENT_CAM_REQ_MGR_ERROR: {
  6983. struct cam_hw_inject_err_evt_param *err_evt_params =
  6984. &evt_params->u.evt_notify.u.err_evt_params;
  6985. switch (err_evt_params->err_type) {
  6986. case CAM_REQ_MGR_ERROR_TYPE_RECOVERY:
  6987. case CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE:
  6988. case CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY:
  6989. case CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY:
  6990. break;
  6991. default:
  6992. CAM_ERR(CAM_ISP,
  6993. "Invalid error type: %u for error event injection err type: %u req id: %llu ctx id: %u dev hdl: %d",
  6994. err_evt_params->err_type, err_evt_params->err_code,
  6995. req_id, ctx->ctx_id, ctx->dev_hdl);
  6996. return -EINVAL;
  6997. }
  6998. CAM_INFO(CAM_ISP,
  6999. "Inject ERR evt: err code: %u err type: %u req id: %llu ctx id: %u dev hdl: %d",
  7000. err_evt_params->err_code, err_evt_params->err_type,
  7001. req_id, ctx->ctx_id, ctx->dev_hdl);
  7002. break;
  7003. }
  7004. case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: {
  7005. struct cam_hw_inject_pf_evt_param *pf_evt_params =
  7006. &evt_params->u.evt_notify.u.pf_evt_params;
  7007. bool non_fatal_en;
  7008. rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en);
  7009. if (rc) {
  7010. CAM_ERR(CAM_ISP,
  7011. "Fail to query whether device's cb has non-fatal enabled rc:%d",
  7012. rc);
  7013. return rc;
  7014. }
  7015. if (!non_fatal_en) {
  7016. CAM_ERR(CAM_ISP,
  7017. "Fail to inject page fault event notification. Page fault is fatal for ISP");
  7018. return -EINVAL;
  7019. }
  7020. CAM_INFO(CAM_ISP,
  7021. "Inject PF evt: req_id: %llu ctx id: %u dev hdl: %d ctx found: %hhu",
  7022. req_id, ctx->ctx_id, ctx->dev_hdl, pf_evt_params->ctx_found);
  7023. break;
  7024. }
  7025. default:
  7026. CAM_ERR(CAM_ISP, "Event notification type not supported: %u", evt_type);
  7027. rc = -EINVAL;
  7028. }
  7029. return rc;
  7030. }
  7031. static int cam_isp_context_inject_evt(void *context, void *evt_args)
  7032. {
  7033. struct cam_context *ctx = context;
  7034. struct cam_isp_context *ctx_isp = NULL;
  7035. struct cam_hw_inject_evt_param *evt_params = evt_args;
  7036. int rc = 0;
  7037. if (!ctx || !evt_args) {
  7038. CAM_ERR(CAM_ISP,
  7039. "Invalid params ctx %s event args %s",
  7040. CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args));
  7041. return -EINVAL;
  7042. }
  7043. ctx_isp = ctx->ctx_priv;
  7044. if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE) {
  7045. rc = cam_isp_context_validate_event_notify_injection(ctx, evt_params);
  7046. if (rc) {
  7047. CAM_ERR(CAM_ISP,
  7048. "Event notification injection failed validation rc: %d", rc);
  7049. return rc;
  7050. }
  7051. } else {
  7052. CAM_ERR(CAM_ISP, "Buffer done err injection %u not supported by ISP",
  7053. evt_params->inject_id);
  7054. return -EINVAL;
  7055. }
  7056. memcpy(&ctx_isp->evt_inject_params, evt_params,
  7057. sizeof(struct cam_hw_inject_evt_param));
  7058. ctx_isp->evt_inject_params.is_valid = true;
  7059. return rc;
  7060. }
  7061. /* top state machine */
  7062. static struct cam_ctx_ops
  7063. cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  7064. /* Uninit */
  7065. {
  7066. .ioctl_ops = {},
  7067. .crm_ops = {},
  7068. .irq_ops = NULL,
  7069. },
  7070. /* Available */
  7071. {
  7072. .ioctl_ops = {
  7073. .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
  7074. },
  7075. .crm_ops = {},
  7076. .irq_ops = NULL,
  7077. },
  7078. /* Acquired */
  7079. {
  7080. .ioctl_ops = {
  7081. .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
  7082. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  7083. .config_dev = __cam_isp_ctx_config_dev_in_acquired,
  7084. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  7085. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  7086. },
  7087. .crm_ops = {
  7088. .link = __cam_isp_ctx_link_in_acquired,
  7089. .unlink = __cam_isp_ctx_unlink_in_acquired,
  7090. .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
  7091. .process_evt = __cam_isp_ctx_process_evt,
  7092. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  7093. .dump_req = __cam_isp_ctx_dump_in_top_state,
  7094. },
  7095. .irq_ops = NULL,
  7096. .pagefault_ops = cam_isp_context_dump_requests,
  7097. .dumpinfo_ops = cam_isp_context_info_dump,
  7098. .evt_inject_ops = cam_isp_context_inject_evt,
  7099. },
  7100. /* Ready */
  7101. {
  7102. .ioctl_ops = {
  7103. .start_dev = __cam_isp_ctx_start_dev_in_ready,
  7104. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  7105. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  7106. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  7107. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  7108. },
  7109. .crm_ops = {
  7110. .unlink = __cam_isp_ctx_unlink_in_ready,
  7111. .flush_req = __cam_isp_ctx_flush_req_in_ready,
  7112. .dump_req = __cam_isp_ctx_dump_in_top_state,
  7113. },
  7114. .irq_ops = NULL,
  7115. .pagefault_ops = cam_isp_context_dump_requests,
  7116. .dumpinfo_ops = cam_isp_context_info_dump,
  7117. .evt_inject_ops = cam_isp_context_inject_evt,
  7118. },
  7119. /* Flushed */
  7120. {
  7121. .ioctl_ops = {
  7122. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  7123. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  7124. .config_dev = __cam_isp_ctx_config_dev_in_flushed,
  7125. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  7126. },
  7127. .crm_ops = {
  7128. .unlink = __cam_isp_ctx_unlink_in_ready,
  7129. .process_evt = __cam_isp_ctx_process_evt,
  7130. .flush_req = __cam_isp_ctx_flush_req_in_flushed_state,
  7131. },
  7132. .irq_ops = NULL,
  7133. .pagefault_ops = cam_isp_context_dump_requests,
  7134. .dumpinfo_ops = cam_isp_context_info_dump,
  7135. .evt_inject_ops = cam_isp_context_inject_evt,
  7136. },
  7137. /* Activated */
  7138. {
  7139. .ioctl_ops = {
  7140. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  7141. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  7142. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  7143. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  7144. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  7145. },
  7146. .crm_ops = {
  7147. .unlink = __cam_isp_ctx_unlink_in_activated,
  7148. .apply_req = __cam_isp_ctx_apply_req,
  7149. .notify_frame_skip =
  7150. __cam_isp_ctx_apply_default_settings,
  7151. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  7152. .process_evt = __cam_isp_ctx_process_evt,
  7153. .dump_req = __cam_isp_ctx_dump_in_top_state,
  7154. },
  7155. .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
  7156. .pagefault_ops = cam_isp_context_dump_requests,
  7157. .dumpinfo_ops = cam_isp_context_info_dump,
  7158. .recovery_ops = cam_isp_context_hw_recovery,
  7159. .evt_inject_ops = cam_isp_context_inject_evt,
  7160. },
  7161. };
  7162. static int cam_isp_context_hw_recovery(void *priv, void *data)
  7163. {
  7164. struct cam_context *ctx = priv;
  7165. int rc = -EPERM;
  7166. if (ctx->hw_mgr_intf->hw_recovery)
  7167. rc = ctx->hw_mgr_intf->hw_recovery(ctx->hw_mgr_intf->hw_mgr_priv, data);
  7168. else
  7169. CAM_ERR(CAM_ISP, "hw mgr doesn't support recovery");
  7170. return rc;
  7171. }
  7172. static void cam_isp_context_find_faulted_context(struct cam_context *ctx,
  7173. struct list_head *req_list, struct cam_hw_dump_pf_args *pf_args, bool *found)
  7174. {
  7175. struct cam_ctx_request *req = NULL;
  7176. struct cam_ctx_request *req_temp = NULL;
  7177. int rc;
  7178. *found = false;
  7179. list_for_each_entry_safe(req, req_temp, req_list, list) {
  7180. CAM_INFO(CAM_ISP, "List req_id: %llu ctx id: %u",
  7181. req->request_id, ctx->ctx_id);
  7182. rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data);
  7183. if (rc)
  7184. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  7185. /*
  7186. * Found faulted buffer. Even if faulted ctx is found, but
  7187. * continue to search for faulted buffer
  7188. */
  7189. if (pf_args->pf_context_info.mem_type != CAM_FAULT_BUF_NOT_FOUND) {
  7190. *found = true;
  7191. break;
  7192. }
  7193. }
  7194. }
  7195. static int cam_isp_context_dump_requests(void *data, void *args)
  7196. {
  7197. struct cam_context *ctx = (struct cam_context *)data;
  7198. struct cam_isp_context *ctx_isp;
  7199. struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args;
  7200. int rc = 0;
  7201. bool found;
  7202. if (!ctx || !pf_args) {
  7203. CAM_ERR(CAM_ISP, "Invalid ctx %pK or pf args %pK",
  7204. ctx, pf_args);
  7205. return -EINVAL;
  7206. }
  7207. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  7208. if (!ctx_isp) {
  7209. CAM_ERR(CAM_ISP, "Invalid isp ctx");
  7210. return -EINVAL;
  7211. }
  7212. if (pf_args->handle_sec_pf)
  7213. goto end;
  7214. CAM_INFO(CAM_ISP,
  7215. "Iterating over active list for isp ctx %d state %d",
  7216. ctx->ctx_id, ctx->state);
  7217. cam_isp_context_find_faulted_context(ctx, &ctx->active_req_list,
  7218. pf_args, &found);
  7219. if (found)
  7220. goto end;
  7221. CAM_INFO(CAM_ISP,
  7222. "Iterating over waiting list of isp ctx %d state %d",
  7223. ctx->ctx_id, ctx->state);
  7224. cam_isp_context_find_faulted_context(ctx, &ctx->wait_req_list,
  7225. pf_args, &found);
  7226. if (found)
  7227. goto end;
  7228. /*
  7229. * In certain scenarios we observe both overflow and SMMU pagefault
  7230. * for a particular request. If overflow is handled before page fault
  7231. * we need to traverse through pending request list because if
  7232. * bubble recovery is enabled on any request we move that request
  7233. * and all the subsequent requests to the pending list while handling
  7234. * overflow error.
  7235. */
  7236. CAM_INFO(CAM_ISP,
  7237. "Iterating over pending req list of isp ctx %d state %d",
  7238. ctx->ctx_id, ctx->state);
  7239. cam_isp_context_find_faulted_context(ctx, &ctx->pending_req_list,
  7240. pf_args, &found);
  7241. if (found)
  7242. goto end;
  7243. end:
  7244. if (pf_args->pf_context_info.resource_type) {
  7245. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  7246. CAM_INFO(CAM_ISP,
  7247. "Page fault on resource:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  7248. __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type,
  7249. pf_args->pf_context_info.resource_type),
  7250. pf_args->pf_context_info.resource_type, ctx->ctx_id, ctx_isp->frame_id,
  7251. ctx_isp->reported_req_id, ctx_isp->last_applied_req_id);
  7252. }
  7253. /*
  7254. * Send PF notification to UMD if PF found on current CTX
  7255. * or it is forced to send PF notification to UMD even if no
  7256. * faulted context found
  7257. */
  7258. if (pf_args->pf_context_info.ctx_found ||
  7259. pf_args->pf_context_info.force_send_pf_evt)
  7260. rc = cam_context_send_pf_evt(ctx, pf_args);
  7261. if (rc)
  7262. CAM_ERR(CAM_ISP,
  7263. "Failed to notify PF event to userspace rc: %d", rc);
  7264. return rc;
  7265. }
  7266. static int cam_isp_context_debug_register(void)
  7267. {
  7268. int rc = 0;
  7269. struct dentry *dbgfileptr = NULL;
  7270. if (!cam_debugfs_available())
  7271. return 0;
  7272. rc = cam_debugfs_create_subdir("isp_ctx", &dbgfileptr);
  7273. if (rc) {
  7274. CAM_ERR(CAM_ISP, "DebugFS could not create directory!");
  7275. return rc;
  7276. }
  7277. /* Store parent inode for cleanup in caller */
  7278. isp_ctx_debug.dentry = dbgfileptr;
  7279. debugfs_create_u32("enable_state_monitor_dump", 0644,
  7280. isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
  7281. debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
  7282. isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
  7283. debugfs_create_u32("disable_internal_recovery_mask", 0644,
  7284. isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery_mask);
  7285. return 0;
  7286. }
  7287. int cam_isp_context_init(struct cam_isp_context *ctx,
  7288. struct cam_context *ctx_base,
  7289. struct cam_req_mgr_kmd_ops *crm_node_intf,
  7290. struct cam_hw_mgr_intf *hw_intf,
  7291. uint32_t ctx_id,
  7292. uint32_t isp_device_type,
  7293. int img_iommu_hdl)
  7294. {
  7295. int rc = -1;
  7296. int i;
  7297. if (!ctx || !ctx_base) {
  7298. CAM_ERR(CAM_ISP, "Invalid Context");
  7299. goto err;
  7300. }
  7301. /* ISP context setup */
  7302. memset(ctx, 0, sizeof(*ctx));
  7303. ctx->base = ctx_base;
  7304. ctx->frame_id = 0;
  7305. ctx->custom_enabled = false;
  7306. ctx->use_frame_header_ts = false;
  7307. ctx->use_default_apply = false;
  7308. ctx->active_req_cnt = 0;
  7309. ctx->reported_req_id = 0;
  7310. ctx->bubble_frame_cnt = 0;
  7311. ctx->req_info.last_bufdone_req_id = 0;
  7312. ctx->v4l2_event_sub_ids = 0;
  7313. ctx->hw_ctx = NULL;
  7314. ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  7315. ctx->substate_machine = cam_isp_ctx_activated_state_machine;
  7316. ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
  7317. ctx->init_timestamp = jiffies_to_msecs(jiffies);
  7318. ctx->isp_device_type = isp_device_type;
  7319. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  7320. ctx->req_base[i].req_priv = &ctx->req_isp[i];
  7321. ctx->req_isp[i].base = &ctx->req_base[i];
  7322. }
  7323. /* camera context setup */
  7324. rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
  7325. crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX, img_iommu_hdl);
  7326. if (rc) {
  7327. CAM_ERR(CAM_ISP, "Camera Context Base init failed");
  7328. goto err;
  7329. }
  7330. /* link camera context with isp context */
  7331. ctx_base->state_machine = cam_isp_ctx_top_state_machine;
  7332. ctx_base->ctx_priv = ctx;
  7333. /* initializing current state for error logging */
  7334. for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
  7335. ctx->cam_isp_ctx_state_monitor[i].curr_state =
  7336. CAM_ISP_CTX_ACTIVATED_MAX;
  7337. }
  7338. atomic64_set(&ctx->state_monitor_head, -1);
  7339. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  7340. atomic64_set(&ctx->event_record_head[i], -1);
  7341. if (!isp_ctx_debug.dentry)
  7342. cam_isp_context_debug_register();
  7343. err:
  7344. return rc;
  7345. }
  7346. int cam_isp_context_deinit(struct cam_isp_context *ctx)
  7347. {
  7348. if (ctx->base)
  7349. cam_context_deinit(ctx->base);
  7350. if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
  7351. CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid",
  7352. __cam_isp_ctx_substate_val_to_type(
  7353. ctx->substate_activated));
  7354. isp_ctx_debug.dentry = NULL;
  7355. memset(ctx, 0, sizeof(*ctx));
  7356. return 0;
  7357. }