cam_isp_context.c 205 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/videodev2.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/ratelimit.h>
  10. #include "cam_mem_mgr.h"
  11. #include "cam_sync_api.h"
  12. #include "cam_req_mgr_dev.h"
  13. #include "cam_trace.h"
  14. #include "cam_debug_util.h"
  15. #include "cam_packet_util.h"
  16. #include "cam_context_utils.h"
  17. #include "cam_cdm_util.h"
  18. #include "cam_isp_context.h"
  19. #include "cam_common_util.h"
  20. #include "cam_req_mgr_debug.h"
  21. #include "cam_cpas_api.h"
  22. static const char isp_dev_name[] = "cam-isp";
  23. static struct cam_isp_ctx_debug isp_ctx_debug;
  24. #define INC_HEAD(head, max_entries, ret) \
  25. div_u64_rem(atomic64_add_return(1, head),\
  26. max_entries, (ret))
  27. static int cam_isp_context_dump_requests(void *data,
  28. struct cam_smmu_pf_info *pf_info);
  29. static int cam_isp_context_hw_recovery(void *priv, void *data);
  30. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  31. struct cam_start_stop_dev_cmd *cmd);
  32. static const char *__cam_isp_evt_val_to_type(
  33. uint32_t evt_id)
  34. {
  35. switch (evt_id) {
  36. case CAM_ISP_CTX_EVENT_SUBMIT:
  37. return "SUBMIT";
  38. case CAM_ISP_CTX_EVENT_APPLY:
  39. return "APPLY";
  40. case CAM_ISP_CTX_EVENT_EPOCH:
  41. return "EPOCH";
  42. case CAM_ISP_CTX_EVENT_RUP:
  43. return "RUP";
  44. case CAM_ISP_CTX_EVENT_BUFDONE:
  45. return "BUFDONE";
  46. default:
  47. return "CAM_ISP_EVENT_INVALID";
  48. }
  49. }
  50. static void __cam_isp_ctx_update_event_record(
  51. struct cam_isp_context *ctx_isp,
  52. enum cam_isp_ctx_event event,
  53. struct cam_ctx_request *req)
  54. {
  55. int iterator = 0;
  56. ktime_t cur_time;
  57. struct cam_isp_ctx_req *req_isp;
  58. if (!ctx_isp) {
  59. CAM_ERR(CAM_ISP, "Invalid Args");
  60. return;
  61. }
  62. switch (event) {
  63. case CAM_ISP_CTX_EVENT_EPOCH:
  64. case CAM_ISP_CTX_EVENT_RUP:
  65. case CAM_ISP_CTX_EVENT_BUFDONE:
  66. break;
  67. case CAM_ISP_CTX_EVENT_SUBMIT:
  68. case CAM_ISP_CTX_EVENT_APPLY:
  69. if (!req) {
  70. CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
  71. return;
  72. }
  73. break;
  74. default:
  75. break;
  76. }
  77. INC_HEAD(&ctx_isp->event_record_head[event],
  78. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
  79. cur_time = ktime_get();
  80. if (req) {
  81. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  82. ctx_isp->event_record[event][iterator].req_id =
  83. req->request_id;
  84. req_isp->event_timestamp[event] = cur_time;
  85. } else {
  86. ctx_isp->event_record[event][iterator].req_id = 0;
  87. }
  88. ctx_isp->event_record[event][iterator].timestamp = cur_time;
  89. }
  90. static int __cam_isp_ctx_dump_event_record(
  91. struct cam_isp_context *ctx_isp,
  92. uintptr_t cpu_addr,
  93. size_t buf_len,
  94. size_t *offset)
  95. {
  96. int i, j;
  97. int index;
  98. size_t remain_len;
  99. uint8_t *dst;
  100. uint32_t oldest_entry, num_entries;
  101. uint32_t min_len;
  102. uint64_t *addr, *start;
  103. uint64_t state_head;
  104. struct timespec64 ts;
  105. struct cam_isp_context_dump_header *hdr;
  106. struct cam_isp_context_event_record *record;
  107. if (!cpu_addr || !buf_len || !offset || !ctx_isp) {
  108. CAM_ERR(CAM_ISP, "Invalid args %pK %zu %pK %pK",
  109. cpu_addr, buf_len, offset, ctx_isp);
  110. return -EINVAL;
  111. }
  112. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  113. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  114. if (state_head == -1) {
  115. return 0;
  116. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  117. num_entries = state_head + 1;
  118. oldest_entry = 0;
  119. } else {
  120. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  121. div_u64_rem(state_head + 1,
  122. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  123. &oldest_entry);
  124. }
  125. index = oldest_entry;
  126. if (buf_len <= *offset) {
  127. CAM_WARN(CAM_ISP,
  128. "Dump buffer overshoot len %zu offset %zu",
  129. buf_len, *offset);
  130. return -ENOSPC;
  131. }
  132. min_len = sizeof(struct cam_isp_context_dump_header) +
  133. ((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
  134. sizeof(uint64_t));
  135. remain_len = buf_len - *offset;
  136. if (remain_len < min_len) {
  137. CAM_WARN(CAM_ISP,
  138. "Dump buffer exhaust remain %zu min %u",
  139. remain_len, min_len);
  140. return -ENOSPC;
  141. }
  142. dst = (uint8_t *)cpu_addr + *offset;
  143. hdr = (struct cam_isp_context_dump_header *)dst;
  144. scnprintf(hdr->tag,
  145. CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN, "ISP_EVT_%s:",
  146. __cam_isp_evt_val_to_type(i));
  147. hdr->word_size = sizeof(uint64_t);
  148. addr = (uint64_t *)(dst +
  149. sizeof(struct cam_isp_context_dump_header));
  150. start = addr;
  151. for (j = 0; j < num_entries; j++) {
  152. record = &ctx_isp->event_record[i][index];
  153. ts = ktime_to_timespec64(record->timestamp);
  154. *addr++ = record->req_id;
  155. *addr++ = ts.tv_sec;
  156. *addr++ = ts.tv_nsec/NSEC_PER_USEC;
  157. index = (index + 1) %
  158. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  159. }
  160. hdr->size = hdr->word_size * (addr - start);
  161. *offset += hdr->size +
  162. sizeof(struct cam_isp_context_dump_header);
  163. }
  164. return 0;
  165. }
  166. static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req,
  167. uint8_t *start_addr, uint8_t *end_addr,
  168. unsigned long *bytes_updated)
  169. {
  170. struct cam_isp_ctx_req_mini_dump *req_md;
  171. struct cam_buf_io_cfg *io_cfg;
  172. struct cam_isp_ctx_req *req_isp;
  173. struct cam_packet *packet = NULL;
  174. unsigned long bytes_required = 0;
  175. bytes_required = sizeof(*req_md);
  176. *bytes_updated = 0;
  177. if (start_addr + bytes_required > end_addr)
  178. return;
  179. req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr;
  180. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  181. req_md->num_acked = req_isp->num_acked;
  182. req_md->num_deferred_acks = req_isp->num_deferred_acks;
  183. req_md->bubble_report = req_isp->bubble_report;
  184. req_md->bubble_detected = req_isp->bubble_detected;
  185. req_md->reapply_type = req_isp->reapply_type;
  186. req_md->request_id = req->request_id;
  187. *bytes_updated += bytes_required;
  188. if (req_isp->num_fence_map_out) {
  189. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  190. req_isp->num_fence_map_out;
  191. if (start_addr + *bytes_updated + bytes_required > end_addr)
  192. return;
  193. req_md->map_out = (struct cam_hw_fence_map_entry *)
  194. ((uint8_t *)start_addr + *bytes_updated);
  195. memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required);
  196. req_md->num_fence_map_out = req_isp->num_fence_map_out;
  197. *bytes_updated += bytes_required;
  198. }
  199. if (req_isp->num_fence_map_in) {
  200. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  201. req_isp->num_fence_map_in;
  202. if (start_addr + *bytes_updated + bytes_required > end_addr)
  203. return;
  204. req_md->map_in = (struct cam_hw_fence_map_entry *)
  205. ((uint8_t *)start_addr + *bytes_updated);
  206. memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required);
  207. req_md->num_fence_map_in = req_isp->num_fence_map_in;
  208. *bytes_updated += bytes_required;
  209. }
  210. packet = req_isp->hw_update_data.packet;
  211. if (packet && packet->num_io_configs) {
  212. bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg);
  213. if (start_addr + *bytes_updated + bytes_required > end_addr)
  214. return;
  215. io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
  216. packet->io_configs_offset / 4);
  217. req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated);
  218. memcpy(req_md->io_cfg, io_cfg, bytes_required);
  219. *bytes_updated += bytes_required;
  220. req_md->num_io_cfg = packet->num_io_configs;
  221. }
  222. }
  223. static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
  224. {
  225. struct cam_isp_ctx_mini_dump_info *md;
  226. struct cam_isp_context *ctx_isp;
  227. struct cam_context *ctx;
  228. struct cam_ctx_request *req, *req_temp;
  229. struct cam_hw_mini_dump_args *dump_args;
  230. uint8_t *start_addr;
  231. uint8_t *end_addr;
  232. unsigned long total_bytes = 0;
  233. unsigned long bytes_updated = 0;
  234. uint32_t i;
  235. if (!priv || !args) {
  236. CAM_ERR(CAM_ISP, "invalid params");
  237. return 0;
  238. }
  239. dump_args = (struct cam_hw_mini_dump_args *)args;
  240. if (dump_args->len < sizeof(*md)) {
  241. CAM_ERR(CAM_ISP,
  242. "In sufficient size received %lu required size: %zu",
  243. dump_args->len, sizeof(*md));
  244. return 0;
  245. }
  246. ctx = (struct cam_context *)priv;
  247. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  248. start_addr = (uint8_t *)dump_args->start_addr;
  249. end_addr = start_addr + dump_args->len;
  250. md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr;
  251. md->sof_timestamp_val = ctx_isp->sof_timestamp_val;
  252. md->boot_timestamp = ctx_isp->boot_timestamp;
  253. md->last_sof_timestamp = ctx_isp->last_sof_timestamp;
  254. md->init_timestamp = ctx_isp->init_timestamp;
  255. md->frame_id = ctx_isp->frame_id;
  256. md->reported_req_id = ctx_isp->reported_req_id;
  257. md->last_applied_req_id = ctx_isp->last_applied_req_id;
  258. md->last_bufdone_err_apply_req_id =
  259. ctx_isp->last_bufdone_err_apply_req_id;
  260. md->frame_id_meta = ctx_isp->frame_id_meta;
  261. md->substate_activated = ctx_isp->substate_activated;
  262. md->ctx_id = ctx->ctx_id;
  263. md->subscribe_event = ctx_isp->subscribe_event;
  264. md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt;
  265. md->isp_device_type = ctx_isp->isp_device_type;
  266. md->active_req_cnt = ctx_isp->active_req_cnt;
  267. md->trigger_id = ctx_isp->trigger_id;
  268. md->rdi_only_context = ctx_isp->rdi_only_context;
  269. md->offline_context = ctx_isp->offline_context;
  270. md->hw_acquired = ctx_isp->hw_acquired;
  271. md->init_received = ctx_isp->init_received;
  272. md->split_acquire = ctx_isp->split_acquire;
  273. md->use_frame_header_ts = ctx_isp->use_frame_header_ts;
  274. md->support_consumed_addr = ctx_isp->support_consumed_addr;
  275. md->use_default_apply = ctx_isp->use_default_apply;
  276. md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress);
  277. md->process_bubble = atomic_read(&ctx_isp->process_bubble);
  278. md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
  279. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  280. memcpy(md->event_record[i], ctx_isp->event_record[i],
  281. sizeof(struct cam_isp_context_event_record) *
  282. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
  283. }
  284. total_bytes += sizeof(*md);
  285. if (start_addr + total_bytes >= end_addr)
  286. goto end;
  287. if (!list_empty(&ctx->active_req_list)) {
  288. md->active_list = (struct cam_isp_ctx_req_mini_dump *)
  289. (start_addr + total_bytes);
  290. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  291. bytes_updated = 0;
  292. __cam_isp_ctx_req_mini_dump(req,
  293. (uint8_t *)&md->active_list[md->active_cnt++],
  294. end_addr, &bytes_updated);
  295. total_bytes += bytes_updated;
  296. if ((start_addr + total_bytes >= end_addr))
  297. goto end;
  298. }
  299. }
  300. if (!list_empty(&ctx->wait_req_list)) {
  301. md->wait_list = (struct cam_isp_ctx_req_mini_dump *)
  302. (start_addr + total_bytes);
  303. list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
  304. bytes_updated = 0;
  305. __cam_isp_ctx_req_mini_dump(req,
  306. (uint8_t *)&md->wait_list[md->wait_cnt++],
  307. end_addr, &bytes_updated);
  308. total_bytes += bytes_updated;
  309. if ((start_addr + total_bytes >= end_addr))
  310. goto end;
  311. }
  312. }
  313. if (!list_empty(&ctx->pending_req_list)) {
  314. md->pending_list = (struct cam_isp_ctx_req_mini_dump *)
  315. (start_addr + total_bytes);
  316. list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
  317. bytes_updated = 0;
  318. __cam_isp_ctx_req_mini_dump(req,
  319. (uint8_t *)&md->pending_list[md->pending_cnt++],
  320. end_addr, &bytes_updated);
  321. total_bytes += bytes_updated;
  322. if ((start_addr + total_bytes >= end_addr))
  323. goto end;
  324. }
  325. }
  326. end:
  327. dump_args->bytes_written = total_bytes;
  328. return 0;
  329. }
  330. static void __cam_isp_ctx_update_state_monitor_array(
  331. struct cam_isp_context *ctx_isp,
  332. enum cam_isp_state_change_trigger trigger_type,
  333. uint64_t req_id)
  334. {
  335. int iterator;
  336. INC_HEAD(&ctx_isp->state_monitor_head,
  337. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
  338. ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
  339. ctx_isp->substate_activated;
  340. ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
  341. ctx_isp->frame_id;
  342. ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
  343. trigger_type;
  344. ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
  345. req_id;
  346. ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
  347. jiffies_to_msecs(jiffies) - ctx_isp->init_timestamp;
  348. }
  349. static const char *__cam_isp_ctx_substate_val_to_type(
  350. enum cam_isp_ctx_activated_substate type)
  351. {
  352. switch (type) {
  353. case CAM_ISP_CTX_ACTIVATED_SOF:
  354. return "SOF";
  355. case CAM_ISP_CTX_ACTIVATED_APPLIED:
  356. return "APPLIED";
  357. case CAM_ISP_CTX_ACTIVATED_EPOCH:
  358. return "EPOCH";
  359. case CAM_ISP_CTX_ACTIVATED_BUBBLE:
  360. return "BUBBLE";
  361. case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
  362. return "BUBBLE_APPLIED";
  363. case CAM_ISP_CTX_ACTIVATED_HW_ERROR:
  364. return "HW_ERROR";
  365. case CAM_ISP_CTX_ACTIVATED_HALT:
  366. return "HALT";
  367. default:
  368. return "INVALID";
  369. }
  370. }
  371. static const char *__cam_isp_hw_evt_val_to_type(
  372. uint32_t evt_id)
  373. {
  374. switch (evt_id) {
  375. case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
  376. return "ERROR";
  377. case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED:
  378. return "APPLIED";
  379. case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
  380. return "SOF";
  381. case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
  382. return "REG_UPDATE";
  383. case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
  384. return "EPOCH";
  385. case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
  386. return "EOF";
  387. case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
  388. return "DONE";
  389. case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
  390. return "FLUSH";
  391. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF:
  392. return "SEC_EVT_SOF";
  393. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH:
  394. return "SEC_EVT_EPOCH";
  395. case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP:
  396. return "OUT_OF_SYNC_FRAME_DROP";
  397. default:
  398. return "CAM_ISP_EVENT_INVALID";
  399. }
  400. }
  401. static void __cam_isp_ctx_dump_state_monitor_array(
  402. struct cam_isp_context *ctx_isp)
  403. {
  404. int i = 0;
  405. int64_t state_head = 0;
  406. uint32_t index, num_entries, oldest_entry;
  407. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  408. if (state_head == -1) {
  409. return;
  410. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  411. num_entries = state_head;
  412. oldest_entry = 0;
  413. } else {
  414. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  415. div_u64_rem(state_head + 1,
  416. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  417. }
  418. CAM_ERR(CAM_ISP,
  419. "Dumping state information for preceding requests");
  420. index = oldest_entry;
  421. for (i = 0; i < num_entries; i++) {
  422. CAM_ERR(CAM_ISP,
  423. "Index[%d] time[%d] : Substate[%s] Frame[%lld] ReqId[%llu] evt_type[%s]",
  424. index,
  425. ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
  426. __cam_isp_ctx_substate_val_to_type(
  427. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  428. ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
  429. ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
  430. __cam_isp_hw_evt_val_to_type(
  431. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  432. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  433. }
  434. }
  435. static int cam_isp_context_info_dump(void *context,
  436. enum cam_context_dump_id id)
  437. {
  438. struct cam_context *ctx = (struct cam_context *)context;
  439. switch (id) {
  440. case CAM_CTX_DUMP_ACQ_INFO: {
  441. cam_context_dump_hw_acq_info(ctx);
  442. break;
  443. }
  444. default:
  445. CAM_DBG(CAM_ISP, "DUMP id not valid %u", id);
  446. break;
  447. }
  448. return 0;
  449. }
  450. static const char *__cam_isp_ctx_crm_trigger_point_to_string(
  451. int trigger_point)
  452. {
  453. switch (trigger_point) {
  454. case CAM_TRIGGER_POINT_SOF:
  455. return "SOF";
  456. case CAM_TRIGGER_POINT_EOF:
  457. return "EOF";
  458. default:
  459. return "Invalid";
  460. }
  461. }
  462. static int __cam_isp_ctx_notify_trigger_util(
  463. int trigger_type, struct cam_isp_context *ctx_isp)
  464. {
  465. int rc = -EINVAL;
  466. struct cam_context *ctx = ctx_isp->base;
  467. struct cam_req_mgr_trigger_notify notify;
  468. /* Trigger type not supported, return */
  469. if (!(ctx_isp->subscribe_event & trigger_type)) {
  470. CAM_DBG(CAM_ISP,
  471. "%s trigger point not subscribed for in mask: %u in ctx: %u on link: 0x%x last_bufdone: %lld",
  472. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  473. ctx_isp->subscribe_event, ctx->ctx_id, ctx->link_hdl,
  474. ctx_isp->req_info.last_bufdone_req_id);
  475. return 0;
  476. }
  477. notify.link_hdl = ctx->link_hdl;
  478. notify.dev_hdl = ctx->dev_hdl;
  479. notify.frame_id = ctx_isp->frame_id;
  480. notify.trigger = trigger_type;
  481. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  482. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  483. notify.trigger_id = ctx_isp->trigger_id;
  484. CAM_DBG(CAM_ISP,
  485. "Notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld",
  486. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  487. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  488. ctx_isp->req_info.last_bufdone_req_id);
  489. rc = ctx->ctx_crm_intf->notify_trigger(&notify);
  490. if (rc)
  491. CAM_ERR(CAM_ISP,
  492. "Failed to notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld rc: %d",
  493. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  494. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  495. ctx_isp->req_info.last_bufdone_req_id, rc);
  496. return rc;
  497. }
  498. static int __cam_isp_ctx_notify_v4l2_error_event(
  499. uint32_t error_type, uint32_t error_code,
  500. uint64_t error_request_id, struct cam_context *ctx)
  501. {
  502. int rc = 0;
  503. struct cam_req_mgr_message req_msg;
  504. req_msg.session_hdl = ctx->session_hdl;
  505. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  506. req_msg.u.err_msg.error_type = error_type;
  507. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  508. req_msg.u.err_msg.request_id = error_request_id;
  509. req_msg.u.err_msg.resource_size = 0x0;
  510. req_msg.u.err_msg.error_code = error_code;
  511. CAM_DBG(CAM_ISP,
  512. "v4l2 error event [type: %u code: %u] for req: %llu in ctx: %u on link: 0x%x notified successfully",
  513. error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl);
  514. rc = cam_req_mgr_notify_message(&req_msg,
  515. V4L_EVENT_CAM_REQ_MGR_ERROR,
  516. V4L_EVENT_CAM_REQ_MGR_EVENT);
  517. if (rc)
  518. CAM_ERR(CAM_ISP,
  519. "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu in ctx %u on link: 0x%x",
  520. error_request_id, ctx->ctx_id);
  521. return rc;
  522. }
  523. static int __cam_isp_ctx_notify_error_util(
  524. uint32_t trigger_type, enum cam_req_mgr_device_error error,
  525. uint64_t req_id, struct cam_isp_context *ctx_isp)
  526. {
  527. int rc = -EINVAL;
  528. struct cam_context *ctx = ctx_isp->base;
  529. struct cam_req_mgr_error_notify notify;
  530. notify.link_hdl = ctx->link_hdl;
  531. notify.dev_hdl = ctx->dev_hdl;
  532. notify.req_id = req_id;
  533. notify.error = error;
  534. notify.trigger = trigger_type;
  535. notify.frame_id = ctx_isp->frame_id;
  536. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  537. if (error == CRM_KMD_ERR_BUBBLE)
  538. CAM_WARN(CAM_ISP,
  539. "Notify CRM about bubble req: %llu frame: %llu in ctx: %u on link: 0x%x",
  540. req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  541. else
  542. CAM_ERR(CAM_ISP,
  543. "Notify CRM about fatal error: %u req: %llu frame: %llu in ctx: %u on link: 0x%x",
  544. error, req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  545. rc = ctx->ctx_crm_intf->notify_err(&notify);
  546. if (rc)
  547. CAM_ERR(CAM_ISP,
  548. "Failed to notify error: %u for req: %lu on ctx: %u in link: 0x%x",
  549. error, req_id, ctx->ctx_id, ctx->link_hdl);
  550. return rc;
  551. }
  552. static int __cam_isp_ctx_trigger_reg_dump(
  553. enum cam_hw_mgr_command cmd,
  554. struct cam_context *ctx)
  555. {
  556. int rc = 0;
  557. struct cam_hw_cmd_args hw_cmd_args;
  558. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  559. hw_cmd_args.cmd_type = cmd;
  560. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  561. &hw_cmd_args);
  562. if (rc) {
  563. CAM_ERR(CAM_ISP, "Reg dump on error failed ctx: %u link: 0x%x rc: %d",
  564. ctx->ctx_id, ctx->link_hdl, rc);
  565. goto end;
  566. }
  567. CAM_DBG(CAM_ISP,
  568. "Reg dump type: %u successful in ctx: %u on link: 0x%x",
  569. cmd, ctx->ctx_id, ctx->link_hdl);
  570. end:
  571. return rc;
  572. }
  573. static int __cam_isp_ctx_pause_crm_timer(
  574. struct cam_context *ctx)
  575. {
  576. int rc = -EINVAL;
  577. struct cam_req_mgr_timer_notify timer;
  578. timer.link_hdl = ctx->link_hdl;
  579. timer.dev_hdl = ctx->dev_hdl;
  580. timer.state = false;
  581. rc = ctx->ctx_crm_intf->notify_timer(&timer);
  582. if (rc) {
  583. CAM_ERR(CAM_ISP, "Failed to pause sof timer in ctx: %u on link: 0x%x",
  584. ctx->ctx_id, ctx->link_hdl);
  585. goto end;
  586. }
  587. CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx: %u link: 0x%x success",
  588. ctx->ctx_id, ctx->link_hdl);
  589. end:
  590. return rc;
  591. }
  592. static int cam_isp_ctx_dump_req(
  593. struct cam_isp_ctx_req *req_isp,
  594. uintptr_t cpu_addr,
  595. size_t buf_len,
  596. size_t *offset,
  597. bool dump_to_buff)
  598. {
  599. int i, rc = 0;
  600. size_t len = 0;
  601. uint32_t *buf_addr;
  602. uint32_t *buf_start, *buf_end;
  603. size_t remain_len = 0;
  604. struct cam_cdm_cmd_buf_dump_info dump_info;
  605. for (i = 0; i < req_isp->num_cfg; i++) {
  606. rc = cam_packet_util_get_cmd_mem_addr(
  607. req_isp->cfg[i].handle, &buf_addr, &len);
  608. if (rc) {
  609. CAM_ERR_RATE_LIMIT(CAM_ISP,
  610. "Failed to get_cmd_mem_addr, rc=%d",
  611. rc);
  612. } else {
  613. if (req_isp->cfg[i].offset >= ((uint32_t)len)) {
  614. CAM_ERR(CAM_ISP,
  615. "Invalid offset exp %u actual %u",
  616. req_isp->cfg[i].offset, (uint32_t)len);
  617. return -EINVAL;
  618. }
  619. remain_len = len - req_isp->cfg[i].offset;
  620. if (req_isp->cfg[i].len >
  621. ((uint32_t)remain_len)) {
  622. CAM_ERR(CAM_ISP,
  623. "Invalid len exp %u remain_len %u",
  624. req_isp->cfg[i].len,
  625. (uint32_t)remain_len);
  626. return -EINVAL;
  627. }
  628. buf_start = (uint32_t *)((uint8_t *) buf_addr +
  629. req_isp->cfg[i].offset);
  630. buf_end = (uint32_t *)((uint8_t *) buf_start +
  631. req_isp->cfg[i].len - 1);
  632. if (dump_to_buff) {
  633. if (!cpu_addr || !offset || !buf_len) {
  634. CAM_ERR(CAM_ISP, "Invalid args");
  635. break;
  636. }
  637. dump_info.src_start = buf_start;
  638. dump_info.src_end = buf_end;
  639. dump_info.dst_start = cpu_addr;
  640. dump_info.dst_offset = *offset;
  641. dump_info.dst_max_size = buf_len;
  642. rc = cam_cdm_util_dump_cmd_bufs_v2(
  643. &dump_info);
  644. *offset = dump_info.dst_offset;
  645. if (rc)
  646. return rc;
  647. } else
  648. cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
  649. }
  650. }
  651. return rc;
  652. }
  653. static int __cam_isp_ctx_enqueue_request_in_order(
  654. struct cam_context *ctx, struct cam_ctx_request *req)
  655. {
  656. struct cam_ctx_request *req_current;
  657. struct cam_ctx_request *req_prev;
  658. struct list_head temp_list;
  659. struct cam_isp_context *ctx_isp;
  660. INIT_LIST_HEAD(&temp_list);
  661. spin_lock_bh(&ctx->lock);
  662. if (list_empty(&ctx->pending_req_list)) {
  663. list_add_tail(&req->list, &ctx->pending_req_list);
  664. } else {
  665. list_for_each_entry_safe_reverse(
  666. req_current, req_prev, &ctx->pending_req_list, list) {
  667. if (req->request_id < req_current->request_id) {
  668. list_del_init(&req_current->list);
  669. list_add(&req_current->list, &temp_list);
  670. continue;
  671. } else if (req->request_id == req_current->request_id) {
  672. CAM_WARN(CAM_ISP,
  673. "Received duplicated request %lld",
  674. req->request_id);
  675. }
  676. break;
  677. }
  678. list_add_tail(&req->list, &ctx->pending_req_list);
  679. if (!list_empty(&temp_list)) {
  680. list_for_each_entry_safe(
  681. req_current, req_prev, &temp_list, list) {
  682. list_del_init(&req_current->list);
  683. list_add_tail(&req_current->list,
  684. &ctx->pending_req_list);
  685. }
  686. }
  687. }
  688. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  689. __cam_isp_ctx_update_event_record(ctx_isp,
  690. CAM_ISP_CTX_EVENT_SUBMIT, req);
  691. spin_unlock_bh(&ctx->lock);
  692. return 0;
  693. }
  694. static int __cam_isp_ctx_enqueue_init_request(
  695. struct cam_context *ctx, struct cam_ctx_request *req)
  696. {
  697. int rc = 0;
  698. struct cam_ctx_request *req_old;
  699. struct cam_isp_ctx_req *req_isp_old;
  700. struct cam_isp_ctx_req *req_isp_new;
  701. struct cam_isp_prepare_hw_update_data *req_update_old;
  702. struct cam_isp_prepare_hw_update_data *req_update_new;
  703. struct cam_isp_prepare_hw_update_data *hw_update_data;
  704. spin_lock_bh(&ctx->lock);
  705. if (list_empty(&ctx->pending_req_list)) {
  706. list_add_tail(&req->list, &ctx->pending_req_list);
  707. CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
  708. req->request_id);
  709. goto end;
  710. }
  711. req_old = list_first_entry(&ctx->pending_req_list,
  712. struct cam_ctx_request, list);
  713. req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
  714. req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
  715. if (req_isp_old->hw_update_data.packet_opcode_type ==
  716. CAM_ISP_PACKET_INIT_DEV) {
  717. if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
  718. ctx->max_hw_update_entries) {
  719. CAM_WARN(CAM_ISP,
  720. "Can not merge INIT pkt num_cfgs = %d",
  721. (req_isp_old->num_cfg +
  722. req_isp_new->num_cfg));
  723. rc = -ENOMEM;
  724. }
  725. if (req_isp_old->num_fence_map_out != 0 ||
  726. req_isp_old->num_fence_map_in != 0) {
  727. CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
  728. rc = -EINVAL;
  729. }
  730. if (!rc) {
  731. memcpy(req_isp_old->fence_map_out,
  732. req_isp_new->fence_map_out,
  733. sizeof(req_isp_new->fence_map_out[0])*
  734. req_isp_new->num_fence_map_out);
  735. req_isp_old->num_fence_map_out =
  736. req_isp_new->num_fence_map_out;
  737. memcpy(req_isp_old->fence_map_in,
  738. req_isp_new->fence_map_in,
  739. sizeof(req_isp_new->fence_map_in[0])*
  740. req_isp_new->num_fence_map_in);
  741. req_isp_old->num_fence_map_in =
  742. req_isp_new->num_fence_map_in;
  743. memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
  744. req_isp_new->cfg,
  745. sizeof(req_isp_new->cfg[0]) *
  746. req_isp_new->num_cfg);
  747. req_isp_old->num_cfg += req_isp_new->num_cfg;
  748. memcpy(&req_old->pf_data, &req->pf_data,
  749. sizeof(struct cam_hw_mgr_dump_pf_data));
  750. if (req_isp_new->hw_update_data.num_reg_dump_buf) {
  751. req_update_new = &req_isp_new->hw_update_data;
  752. req_update_old = &req_isp_old->hw_update_data;
  753. memcpy(&req_update_old->reg_dump_buf_desc,
  754. &req_update_new->reg_dump_buf_desc,
  755. sizeof(struct cam_cmd_buf_desc) *
  756. req_update_new->num_reg_dump_buf);
  757. req_update_old->num_reg_dump_buf =
  758. req_update_new->num_reg_dump_buf;
  759. }
  760. /* Update HW update params for ePCR */
  761. hw_update_data = &req_isp_new->hw_update_data;
  762. req_isp_old->hw_update_data.frame_header_res_id =
  763. req_isp_new->hw_update_data.frame_header_res_id;
  764. req_isp_old->hw_update_data.frame_header_cpu_addr =
  765. hw_update_data->frame_header_cpu_addr;
  766. req_isp_old->hw_update_data.mup_en = req_isp_new->hw_update_data.mup_en;
  767. req_isp_old->hw_update_data.mup_val = req_isp_new->hw_update_data.mup_val;
  768. req_old->request_id = req->request_id;
  769. list_add_tail(&req->list, &ctx->free_req_list);
  770. }
  771. } else {
  772. CAM_WARN(CAM_ISP,
  773. "Received Update pkt before INIT pkt. req_id= %lld",
  774. req->request_id);
  775. rc = -EINVAL;
  776. }
  777. end:
  778. spin_unlock_bh(&ctx->lock);
  779. return rc;
  780. }
  781. static char *__cam_isp_ife_sfe_resource_handle_id_to_type(
  782. uint32_t resource_handle)
  783. {
  784. switch (resource_handle) {
  785. /* IFE output ports */
  786. case CAM_ISP_IFE_OUT_RES_FULL: return "IFE_FULL";
  787. case CAM_ISP_IFE_OUT_RES_DS4: return "IFE_DS4";
  788. case CAM_ISP_IFE_OUT_RES_DS16: return "IFE_DS16";
  789. case CAM_ISP_IFE_OUT_RES_RAW_DUMP: return "IFE_RAW_DUMP";
  790. case CAM_ISP_IFE_OUT_RES_FD: return "IFE_FD";
  791. case CAM_ISP_IFE_OUT_RES_PDAF: return "IFE_PDAF";
  792. case CAM_ISP_IFE_OUT_RES_RDI_0: return "IFE_RDI_0";
  793. case CAM_ISP_IFE_OUT_RES_RDI_1: return "IFE_RDI_1";
  794. case CAM_ISP_IFE_OUT_RES_RDI_2: return "IFE_RDI_2";
  795. case CAM_ISP_IFE_OUT_RES_RDI_3: return "IFE_RDI_3";
  796. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: return "IFE_STATS_HDR_BE";
  797. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: return "IFE_STATS_HDR_BHIST";
  798. case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: return "IFE_STATS_TL_BG";
  799. case CAM_ISP_IFE_OUT_RES_STATS_BF: return "IFE_STATS_BF";
  800. case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: return "IFE_STATS_AWB_BG";
  801. case CAM_ISP_IFE_OUT_RES_STATS_BHIST: return "IFE_STATS_BHIST";
  802. case CAM_ISP_IFE_OUT_RES_STATS_RS: return "IFE_STATS_RS";
  803. case CAM_ISP_IFE_OUT_RES_STATS_CS: return "IFE_STATS_CS";
  804. case CAM_ISP_IFE_OUT_RES_STATS_IHIST: return "IFE_STATS_IHIST";
  805. case CAM_ISP_IFE_OUT_RES_FULL_DISP: return "IFE_FULL_DISP";
  806. case CAM_ISP_IFE_OUT_RES_DS4_DISP: return "IFE_DS4_DISP";
  807. case CAM_ISP_IFE_OUT_RES_DS16_DISP: return "IFE_DS16_DISP";
  808. case CAM_ISP_IFE_OUT_RES_2PD: return "IFE_2PD";
  809. case CAM_ISP_IFE_OUT_RES_LCR: return "IFE_LCR";
  810. case CAM_ISP_IFE_OUT_RES_AWB_BFW: return "IFE_AWB_BFW";
  811. case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: return "IFE_PREPROCESS_2PD";
  812. case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: return "IFE_STATS_AEC_BE";
  813. case CAM_ISP_IFE_OUT_RES_LTM_STATS: return "IFE_LTM_STATS";
  814. case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: return "IFE_STATS_GTM_BHIST";
  815. case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: return "IFE_STATS_BG";
  816. case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: return "IFE_PREPROCESS_RAW";
  817. case CAM_ISP_IFE_OUT_RES_SPARSE_PD: return "IFE_SPARSE_PD";
  818. case CAM_ISP_IFE_OUT_RES_STATS_CAF: return "IFE_STATS_CAF";
  819. case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: return "IFE_STATS_BAYER_RS";
  820. case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: return "IFE_PDAF_PARSED_DATA";
  821. /* SFE output ports */
  822. case CAM_ISP_SFE_OUT_RES_RDI_0: return "SFE_RDI_0";
  823. case CAM_ISP_SFE_OUT_RES_RDI_1: return "SFE_RDI_1";
  824. case CAM_ISP_SFE_OUT_RES_RDI_2: return "SFE_RDI_2";
  825. case CAM_ISP_SFE_OUT_RES_RDI_3: return "SFE_RDI_3";
  826. case CAM_ISP_SFE_OUT_RES_RDI_4: return "SFE_RDI_4";
  827. case CAM_ISP_SFE_OUT_BE_STATS_0: return "SFE_BE_STATS_0";
  828. case CAM_ISP_SFE_OUT_BE_STATS_1: return "SFE_BE_STATS_1";
  829. case CAM_ISP_SFE_OUT_BE_STATS_2: return "SFE_BE_STATS_2";
  830. case CAM_ISP_SFE_OUT_BHIST_STATS_0: return "SFE_BHIST_STATS_0";
  831. case CAM_ISP_SFE_OUT_BHIST_STATS_1: return "SFE_BHIST_STATS_1";
  832. case CAM_ISP_SFE_OUT_BHIST_STATS_2: return "SFE_BHIST_STATS_2";
  833. case CAM_ISP_SFE_OUT_RES_LCR: return "SFE_LCR";
  834. case CAM_ISP_SFE_OUT_RES_RAW_DUMP: return "SFE_PROCESSED_RAW";
  835. case CAM_ISP_SFE_OUT_RES_IR: return "SFE_IR";
  836. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: return "SFE_RS_STATS_0";
  837. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: return "SFE_RS_STATS_1";
  838. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: return "SFE_RS_STATS_2";
  839. /* Handle invalid type */
  840. default: return "Invalid_Resource_Type";
  841. }
  842. }
  843. static const char *__cam_isp_tfe_resource_handle_id_to_type(
  844. uint32_t resource_handle)
  845. {
  846. switch (resource_handle) {
  847. /* TFE output ports */
  848. case CAM_ISP_TFE_OUT_RES_FULL: return "TFE_FULL";
  849. case CAM_ISP_TFE_OUT_RES_RAW_DUMP: return "TFE_RAW_DUMP";
  850. case CAM_ISP_TFE_OUT_RES_PDAF: return "TFE_PDAF";
  851. case CAM_ISP_TFE_OUT_RES_RDI_0: return "TFE_RDI_0";
  852. case CAM_ISP_TFE_OUT_RES_RDI_1: return "TFE_RDI_1";
  853. case CAM_ISP_TFE_OUT_RES_RDI_2: return "TFE_RDI_2";
  854. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: return "TFE_STATS_HDR_BE";
  855. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: return "TFE_STATS_HDR_BHIST";
  856. case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: return "TFE_STATS_TL_BG";
  857. case CAM_ISP_TFE_OUT_RES_STATS_BF: return "TFE_STATS_BF";
  858. case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: return "TFE_STATS_AWB_BG";
  859. case CAM_ISP_TFE_OUT_RES_STATS_RS: return "TFE_STATS_RS";
  860. case CAM_ISP_TFE_OUT_RES_DS4: return "TFE_DS_4";
  861. case CAM_ISP_TFE_OUT_RES_DS16: return "TFE_DS_16";
  862. case CAM_ISP_TFE_OUT_RES_AI: return "TFE_AI";
  863. /* Handle invalid type */
  864. default: return "Invalid_Resource_Type";
  865. }
  866. }
  867. static const char *__cam_isp_resource_handle_id_to_type(
  868. uint32_t device_type, uint32_t resource_handle)
  869. {
  870. switch (device_type) {
  871. case CAM_IFE_DEVICE_TYPE:
  872. return __cam_isp_ife_sfe_resource_handle_id_to_type(resource_handle);
  873. case CAM_TFE_DEVICE_TYPE:
  874. return __cam_isp_tfe_resource_handle_id_to_type(resource_handle);
  875. default:
  876. return "INVALID_DEV_TYPE";
  877. }
  878. }
  879. static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
  880. {
  881. uint64_t ts = 0;
  882. if (!evt_data)
  883. return 0;
  884. switch (evt_id) {
  885. case CAM_ISP_HW_EVENT_ERROR:
  886. ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
  887. timestamp;
  888. break;
  889. case CAM_ISP_HW_EVENT_SOF:
  890. ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
  891. timestamp;
  892. break;
  893. case CAM_ISP_HW_EVENT_REG_UPDATE:
  894. ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
  895. timestamp;
  896. break;
  897. case CAM_ISP_HW_EVENT_EPOCH:
  898. ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
  899. timestamp;
  900. break;
  901. case CAM_ISP_HW_EVENT_EOF:
  902. ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
  903. timestamp;
  904. break;
  905. case CAM_ISP_HW_EVENT_DONE:
  906. case CAM_ISP_HW_SECONDARY_EVENT:
  907. break;
  908. default:
  909. CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
  910. }
  911. return ts;
  912. }
  913. static void __cam_isp_ctx_send_sof_boot_timestamp(
  914. struct cam_isp_context *ctx_isp, uint64_t request_id,
  915. uint32_t sof_event_status)
  916. {
  917. struct cam_req_mgr_message req_msg;
  918. req_msg.session_hdl = ctx_isp->base->session_hdl;
  919. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  920. req_msg.u.frame_msg.request_id = request_id;
  921. req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
  922. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  923. req_msg.u.frame_msg.sof_status = sof_event_status;
  924. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  925. CAM_DBG(CAM_ISP,
  926. "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u",
  927. request_id, ctx_isp->frame_id,
  928. ctx_isp->boot_timestamp, sof_event_status);
  929. if (cam_req_mgr_notify_message(&req_msg,
  930. V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
  931. V4L_EVENT_CAM_REQ_MGR_EVENT))
  932. CAM_ERR(CAM_ISP,
  933. "Error in notifying the boot time for req id:%lld",
  934. request_id);
  935. }
  936. static void __cam_isp_ctx_send_unified_timestamp(
  937. struct cam_isp_context *ctx_isp, uint64_t request_id)
  938. {
  939. struct cam_req_mgr_message req_msg;
  940. req_msg.session_hdl = ctx_isp->base->session_hdl;
  941. req_msg.u.frame_msg_v2.frame_id = ctx_isp->frame_id;
  942. req_msg.u.frame_msg_v2.request_id = request_id;
  943. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_SOF_QTIMER_TIMESTAMP] =
  944. (request_id == 0) ? 0 : ctx_isp->sof_timestamp_val;
  945. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_BOOT_TIMESTAMP] = ctx_isp->boot_timestamp;
  946. req_msg.u.frame_msg_v2.link_hdl = ctx_isp->base->link_hdl;
  947. req_msg.u.frame_msg_v2.frame_id_meta = ctx_isp->frame_id_meta;
  948. CAM_DBG(CAM_ISP,
  949. "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:0x%llx ctx %d\
  950. boot time stamp:0x%llx", ctx_isp->base->link_hdl, request_id,
  951. ctx_isp->frame_id, ctx_isp->sof_timestamp_val,ctx_isp->base->ctx_id,
  952. ctx_isp->boot_timestamp);
  953. if (cam_req_mgr_notify_message(&req_msg,
  954. V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS, V4L_EVENT_CAM_REQ_MGR_EVENT))
  955. CAM_ERR(CAM_ISP,
  956. "Error in notifying the sof and boot time for req id:%lld",
  957. request_id);
  958. }
  959. static void __cam_isp_ctx_send_sof_timestamp_frame_header(
  960. struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr,
  961. uint64_t request_id, uint32_t sof_event_status)
  962. {
  963. uint32_t *time32 = NULL;
  964. uint64_t timestamp = 0;
  965. struct cam_req_mgr_message req_msg;
  966. time32 = frame_header_cpu_addr;
  967. timestamp = (uint64_t) time32[1];
  968. timestamp = timestamp << 24;
  969. timestamp |= (uint64_t)(time32[0] >> 8);
  970. timestamp = mul_u64_u32_div(timestamp,
  971. CAM_IFE_QTIMER_MUL_FACTOR,
  972. CAM_IFE_QTIMER_DIV_FACTOR);
  973. ctx_isp->sof_timestamp_val = timestamp;
  974. req_msg.session_hdl = ctx_isp->base->session_hdl;
  975. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  976. req_msg.u.frame_msg.request_id = request_id;
  977. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  978. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  979. req_msg.u.frame_msg.sof_status = sof_event_status;
  980. CAM_DBG(CAM_ISP,
  981. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  982. request_id, ctx_isp->frame_id,
  983. ctx_isp->sof_timestamp_val, sof_event_status);
  984. if (cam_req_mgr_notify_message(&req_msg,
  985. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  986. CAM_ERR(CAM_ISP,
  987. "Error in notifying the sof time for req id:%lld",
  988. request_id);
  989. }
  990. static void __cam_isp_ctx_send_sof_timestamp(
  991. struct cam_isp_context *ctx_isp, uint64_t request_id,
  992. uint32_t sof_event_status)
  993. {
  994. struct cam_req_mgr_message req_msg;
  995. if ((ctx_isp->v4l2_event_sub_ids & (1 << V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS))
  996. && !ctx_isp->use_frame_header_ts) {
  997. __cam_isp_ctx_send_unified_timestamp(ctx_isp,request_id);
  998. return;
  999. }
  1000. if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
  1001. goto end;
  1002. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1003. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1004. req_msg.u.frame_msg.request_id = request_id;
  1005. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1006. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1007. req_msg.u.frame_msg.sof_status = sof_event_status;
  1008. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1009. CAM_DBG(CAM_ISP,
  1010. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1011. request_id, ctx_isp->frame_id,
  1012. ctx_isp->sof_timestamp_val, sof_event_status);
  1013. if (cam_req_mgr_notify_message(&req_msg,
  1014. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1015. CAM_ERR(CAM_ISP,
  1016. "Error in notifying the sof time for req id:%lld",
  1017. request_id);
  1018. end:
  1019. __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
  1020. request_id, sof_event_status);
  1021. }
  1022. static void __cam_isp_ctx_handle_buf_done_fail_log(
  1023. uint64_t request_id, struct cam_isp_ctx_req *req_isp,
  1024. uint32_t isp_device_type)
  1025. {
  1026. int i;
  1027. const char *handle_type;
  1028. if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
  1029. CAM_ERR(CAM_ISP,
  1030. "Num Resources exceed mMAX %d >= %d ",
  1031. req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
  1032. return;
  1033. }
  1034. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1035. "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d",
  1036. request_id, req_isp->num_fence_map_out, req_isp->num_acked,
  1037. req_isp->bubble_report, req_isp->bubble_detected);
  1038. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1039. "Resource Handles that fail to generate buf_done in prev frame");
  1040. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1041. if (req_isp->fence_map_out[i].sync_id != -1) {
  1042. handle_type = __cam_isp_resource_handle_id_to_type(
  1043. isp_device_type, req_isp->fence_map_out[i].resource_handle);
  1044. trace_cam_log_event("Buf_done Congestion",
  1045. handle_type, request_id, req_isp->fence_map_out[i].sync_id);
  1046. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1047. "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
  1048. handle_type,
  1049. req_isp->fence_map_out[i].resource_handle,
  1050. req_isp->fence_map_out[i].sync_id);
  1051. }
  1052. }
  1053. }
  1054. static void __cam_isp_context_reset_internal_recovery_params(
  1055. struct cam_isp_context *ctx_isp)
  1056. {
  1057. atomic_set(&ctx_isp->internal_recovery_set, 0);
  1058. atomic_set(&ctx_isp->process_bubble, 0);
  1059. ctx_isp->recovery_req_id = 0;
  1060. }
  1061. static int __cam_isp_context_try_internal_recovery(
  1062. struct cam_isp_context *ctx_isp)
  1063. {
  1064. int rc = 0;
  1065. struct cam_context *ctx = ctx_isp->base;
  1066. struct cam_ctx_request *req;
  1067. struct cam_isp_ctx_req *req_isp;
  1068. /*
  1069. * Start with wait list, if recovery is stil set
  1070. * errored request has not been moved to pending yet.
  1071. * Buf done for errored request has not occurred recover
  1072. * from here
  1073. */
  1074. if (!list_empty(&ctx->wait_req_list)) {
  1075. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  1076. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1077. if (req->request_id == ctx_isp->recovery_req_id) {
  1078. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1079. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1080. if (rc) {
  1081. /* Unable to do bubble recovery reset back to normal */
  1082. CAM_WARN(CAM_ISP,
  1083. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1084. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1085. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1086. req_isp->bubble_detected = false;
  1087. goto end;
  1088. }
  1089. list_del_init(&req->list);
  1090. list_add(&req->list, &ctx->pending_req_list);
  1091. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1092. CAM_INFO(CAM_ISP,
  1093. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1094. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1095. goto end;
  1096. }
  1097. }
  1098. /*
  1099. * If not in wait list only other possibility is request is in pending list
  1100. * on error detection, bubble detect is set assuming new frame after detection
  1101. * comes in, there is an rup it's moved to active list and it finishes with
  1102. * it's buf done's
  1103. */
  1104. if (!list_empty(&ctx->pending_req_list)) {
  1105. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list);
  1106. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1107. if (req->request_id == ctx_isp->recovery_req_id) {
  1108. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1109. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1110. if (rc) {
  1111. /* Unable to do bubble recovery reset back to normal */
  1112. CAM_WARN(CAM_ISP,
  1113. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1114. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1115. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1116. req_isp->bubble_detected = false;
  1117. goto end;
  1118. }
  1119. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1120. CAM_INFO(CAM_ISP,
  1121. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1122. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1123. goto end;
  1124. }
  1125. }
  1126. /* If request is not found in either of the lists skip recovery */
  1127. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1128. end:
  1129. return rc;
  1130. }
  1131. static int __cam_isp_ctx_handle_buf_done_for_req_list(
  1132. struct cam_isp_context *ctx_isp,
  1133. struct cam_ctx_request *req)
  1134. {
  1135. int rc = 0, i;
  1136. uint64_t buf_done_req_id;
  1137. struct cam_isp_ctx_req *req_isp;
  1138. struct cam_context *ctx = ctx_isp->base;
  1139. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1140. ctx_isp->active_req_cnt--;
  1141. buf_done_req_id = req->request_id;
  1142. if (req_isp->bubble_detected && req_isp->bubble_report) {
  1143. req_isp->num_acked = 0;
  1144. req_isp->num_deferred_acks = 0;
  1145. req_isp->bubble_detected = false;
  1146. list_del_init(&req->list);
  1147. atomic_set(&ctx_isp->process_bubble, 0);
  1148. req_isp->cdm_reset_before_apply = false;
  1149. ctx_isp->bubble_frame_cnt = 0;
  1150. if (buf_done_req_id <= ctx->last_flush_req) {
  1151. for (i = 0; i < req_isp->num_fence_map_out; i++)
  1152. rc = cam_sync_signal(
  1153. req_isp->fence_map_out[i].sync_id,
  1154. CAM_SYNC_STATE_SIGNALED_ERROR,
  1155. CAM_SYNC_ISP_EVENT_BUBBLE);
  1156. list_add_tail(&req->list, &ctx->free_req_list);
  1157. CAM_DBG(CAM_REQ,
  1158. "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
  1159. buf_done_req_id, ctx_isp->active_req_cnt,
  1160. ctx->ctx_id);
  1161. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1162. } else {
  1163. list_add(&req->list, &ctx->pending_req_list);
  1164. CAM_DBG(CAM_REQ,
  1165. "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
  1166. req->request_id, ctx_isp->active_req_cnt,
  1167. ctx->ctx_id);
  1168. }
  1169. } else {
  1170. if (!ctx_isp->use_frame_header_ts) {
  1171. if (ctx_isp->reported_req_id < buf_done_req_id) {
  1172. ctx_isp->reported_req_id = buf_done_req_id;
  1173. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  1174. buf_done_req_id,
  1175. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1176. }
  1177. }
  1178. list_del_init(&req->list);
  1179. list_add_tail(&req->list, &ctx->free_req_list);
  1180. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  1181. req_isp->cdm_reset_before_apply = false;
  1182. req_isp->num_acked = 0;
  1183. req_isp->num_deferred_acks = 0;
  1184. /*
  1185. * Only update the process_bubble and bubble_frame_cnt
  1186. * when bubble is detected on this req, in case the other
  1187. * request is processing bubble.
  1188. */
  1189. if (req_isp->bubble_detected) {
  1190. atomic_set(&ctx_isp->process_bubble, 0);
  1191. ctx_isp->bubble_frame_cnt = 0;
  1192. req_isp->bubble_detected = false;
  1193. }
  1194. CAM_DBG(CAM_REQ,
  1195. "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
  1196. buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1197. ctx_isp->req_info.last_bufdone_req_id = req->request_id;
  1198. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1199. }
  1200. if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt)
  1201. __cam_isp_context_try_internal_recovery(ctx_isp);
  1202. cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
  1203. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1204. CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
  1205. __cam_isp_ctx_update_event_record(ctx_isp,
  1206. CAM_ISP_CTX_EVENT_BUFDONE, req);
  1207. return rc;
  1208. }
  1209. static int __cam_isp_ctx_handle_buf_done_for_request(
  1210. struct cam_isp_context *ctx_isp,
  1211. struct cam_ctx_request *req,
  1212. struct cam_isp_hw_done_event_data *done,
  1213. uint32_t bubble_state,
  1214. struct cam_isp_hw_done_event_data *done_next_req)
  1215. {
  1216. int rc = 0;
  1217. int i, j;
  1218. struct cam_isp_ctx_req *req_isp;
  1219. struct cam_context *ctx = ctx_isp->base;
  1220. const char *handle_type;
  1221. trace_cam_buf_done("ISP", ctx, req);
  1222. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1223. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1224. bubble_state, req_isp->bubble_detected);
  1225. done_next_req->num_handles = 0;
  1226. done_next_req->timestamp = done->timestamp;
  1227. for (i = 0; i < done->num_handles; i++) {
  1228. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1229. if (done->resource_handle[i] ==
  1230. req_isp->fence_map_out[j].resource_handle)
  1231. break;
  1232. }
  1233. if (j == req_isp->num_fence_map_out) {
  1234. /*
  1235. * If not found in current request, it could be
  1236. * belonging to next request, this can happen if
  1237. * IRQ delay happens. It is only valid when the
  1238. * platform doesn't have last consumed address.
  1239. */
  1240. CAM_WARN(CAM_ISP,
  1241. "BUF_DONE for res %s not found in Req %lld ",
  1242. __cam_isp_resource_handle_id_to_type(
  1243. ctx_isp->isp_device_type,
  1244. done->resource_handle[i]),
  1245. req->request_id);
  1246. done_next_req->resource_handle
  1247. [done_next_req->num_handles++] =
  1248. done->resource_handle[i];
  1249. continue;
  1250. }
  1251. if (req_isp->fence_map_out[j].sync_id == -1) {
  1252. handle_type =
  1253. __cam_isp_resource_handle_id_to_type(
  1254. ctx_isp->isp_device_type,
  1255. req_isp->fence_map_out[j].resource_handle);
  1256. CAM_WARN(CAM_ISP,
  1257. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1258. req->request_id, i, j, handle_type);
  1259. trace_cam_log_event("Duplicate BufDone",
  1260. handle_type, req->request_id, ctx->ctx_id);
  1261. done_next_req->resource_handle
  1262. [done_next_req->num_handles++] =
  1263. done->resource_handle[i];
  1264. continue;
  1265. }
  1266. /* Get buf handles from packet and retrieve them from presil framework */
  1267. if (cam_presil_mode_enabled()) {
  1268. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1269. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1270. if (rc) {
  1271. CAM_ERR(CAM_ISP,
  1272. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1273. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1274. return rc;
  1275. }
  1276. }
  1277. if (!req_isp->bubble_detected) {
  1278. CAM_DBG(CAM_ISP,
  1279. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1280. req->request_id,
  1281. req_isp->fence_map_out[j].resource_handle,
  1282. req_isp->fence_map_out[j].sync_id,
  1283. ctx->ctx_id);
  1284. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1285. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1286. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1287. if (rc)
  1288. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  1289. rc);
  1290. } else if (!req_isp->bubble_report) {
  1291. CAM_DBG(CAM_ISP,
  1292. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1293. req->request_id,
  1294. req_isp->fence_map_out[j].resource_handle,
  1295. req_isp->fence_map_out[j].sync_id,
  1296. ctx->ctx_id);
  1297. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1298. CAM_SYNC_STATE_SIGNALED_ERROR,
  1299. CAM_SYNC_ISP_EVENT_BUBBLE);
  1300. if (rc)
  1301. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  1302. rc);
  1303. } else {
  1304. /*
  1305. * Ignore the buffer done if bubble detect is on
  1306. * Increment the ack number here, and queue the
  1307. * request back to pending list whenever all the
  1308. * buffers are done.
  1309. */
  1310. req_isp->num_acked++;
  1311. CAM_DBG(CAM_ISP,
  1312. "buf done with bubble state %d recovery %d",
  1313. bubble_state, req_isp->bubble_report);
  1314. continue;
  1315. }
  1316. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1317. req->request_id,
  1318. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1319. if (!rc) {
  1320. req_isp->num_acked++;
  1321. req_isp->fence_map_out[j].sync_id = -1;
  1322. }
  1323. if ((ctx_isp->use_frame_header_ts) &&
  1324. (req_isp->hw_update_data.frame_header_res_id ==
  1325. req_isp->fence_map_out[j].resource_handle))
  1326. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1327. ctx_isp,
  1328. req_isp->hw_update_data.frame_header_cpu_addr,
  1329. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1330. }
  1331. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1332. /* Should not happen */
  1333. CAM_ERR(CAM_ISP,
  1334. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1335. req->request_id, req_isp->num_acked,
  1336. req_isp->num_fence_map_out, ctx->ctx_id);
  1337. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1338. }
  1339. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1340. return rc;
  1341. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1342. return rc;
  1343. }
  1344. static int __cam_isp_handle_deferred_buf_done(
  1345. struct cam_isp_context *ctx_isp,
  1346. struct cam_ctx_request *req,
  1347. bool bubble_handling,
  1348. uint32_t status, uint32_t event_cause)
  1349. {
  1350. int i, j;
  1351. int rc = 0;
  1352. struct cam_isp_ctx_req *req_isp =
  1353. (struct cam_isp_ctx_req *) req->req_priv;
  1354. struct cam_context *ctx = ctx_isp->base;
  1355. CAM_DBG(CAM_ISP,
  1356. "ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
  1357. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1358. req_isp->num_acked, bubble_handling);
  1359. for (i = 0; i < req_isp->num_deferred_acks; i++) {
  1360. j = req_isp->deferred_fence_map_index[i];
  1361. CAM_DBG(CAM_ISP,
  1362. "ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
  1363. ctx->ctx_id, status, event_cause,
  1364. req->request_id,
  1365. req_isp->fence_map_out[j].resource_handle,
  1366. req_isp->fence_map_out[j].sync_id);
  1367. if (req_isp->fence_map_out[j].sync_id == -1) {
  1368. CAM_WARN(CAM_ISP,
  1369. "ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
  1370. ctx->ctx_id, req->request_id, j,
  1371. req_isp->fence_map_out[j].resource_handle);
  1372. continue;
  1373. }
  1374. if (!bubble_handling) {
  1375. CAM_WARN(CAM_ISP,
  1376. "ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
  1377. ctx->ctx_id, req->request_id, status,
  1378. req_isp->fence_map_out[j].resource_handle);
  1379. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1380. status, event_cause);
  1381. if (rc) {
  1382. CAM_ERR(CAM_ISP,
  1383. "ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
  1384. ctx->ctx_id, req->request_id,
  1385. req_isp->fence_map_out[j].sync_id,
  1386. status, rc);
  1387. } else {
  1388. req_isp->num_acked++;
  1389. req_isp->fence_map_out[j].sync_id = -1;
  1390. }
  1391. } else {
  1392. req_isp->num_acked++;
  1393. }
  1394. }
  1395. req_isp->num_deferred_acks = 0;
  1396. return rc;
  1397. }
  1398. static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1399. struct cam_isp_context *ctx_isp,
  1400. struct cam_ctx_request *req,
  1401. struct cam_isp_hw_done_event_data *done,
  1402. uint32_t bubble_state,
  1403. bool verify_consumed_addr,
  1404. bool defer_buf_done)
  1405. {
  1406. int rc = 0;
  1407. int i, j;
  1408. struct cam_isp_ctx_req *req_isp;
  1409. struct cam_context *ctx = ctx_isp->base;
  1410. const char *handle_type;
  1411. uint32_t cmp_addr = 0;
  1412. trace_cam_buf_done("ISP", ctx, req);
  1413. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1414. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1415. bubble_state, req_isp->bubble_detected);
  1416. for (i = 0; i < done->num_handles; i++) {
  1417. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1418. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1419. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1420. req_isp->fence_map_out[j].image_buf_addr[0];
  1421. if (verify_consumed_addr && (done->last_consumed_addr[i] != cmp_addr))
  1422. continue;
  1423. if (done->resource_handle[i] ==
  1424. req_isp->fence_map_out[j].resource_handle)
  1425. break;
  1426. }
  1427. if (j == req_isp->num_fence_map_out) {
  1428. /*
  1429. * If not found in current request, it could be
  1430. * belonging to next request, this can happen if
  1431. * IRQ delay happens. It is only valid when the
  1432. * platform doesn't have last consumed address.
  1433. */
  1434. CAM_DBG(CAM_ISP,
  1435. "BUF_DONE for res %s not found in Req %lld ",
  1436. __cam_isp_resource_handle_id_to_type(
  1437. ctx_isp->isp_device_type, done->resource_handle[i]),
  1438. req->request_id);
  1439. continue;
  1440. }
  1441. if (req_isp->fence_map_out[j].sync_id == -1) {
  1442. handle_type = __cam_isp_resource_handle_id_to_type(
  1443. ctx_isp->isp_device_type,
  1444. req_isp->fence_map_out[j].resource_handle);
  1445. CAM_WARN(CAM_ISP,
  1446. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1447. req->request_id, i, j, handle_type);
  1448. trace_cam_log_event("Duplicate BufDone",
  1449. handle_type, req->request_id, ctx->ctx_id);
  1450. continue;
  1451. }
  1452. /* Get buf handles from packet and retrieve them from presil framework */
  1453. if (cam_presil_mode_enabled()) {
  1454. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1455. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1456. if (rc) {
  1457. CAM_ERR(CAM_ISP,
  1458. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1459. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1460. return rc;
  1461. }
  1462. }
  1463. if (defer_buf_done) {
  1464. uint32_t deferred_indx = req_isp->num_deferred_acks;
  1465. /*
  1466. * If we are handling this BUF_DONE event for a request
  1467. * that is still in wait_list, do not signal now,
  1468. * instead mark it as done and handle it later -
  1469. * if this request is going into BUBBLE state later
  1470. * it will automatically be re-applied. If this is not
  1471. * going into BUBBLE, signal fences later.
  1472. * Note - we will come here only if the last consumed
  1473. * address matches with this ports buffer.
  1474. */
  1475. req_isp->deferred_fence_map_index[deferred_indx] = j;
  1476. req_isp->num_deferred_acks++;
  1477. CAM_DBG(CAM_ISP,
  1478. "ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
  1479. ctx->ctx_id, req->request_id, bubble_state,
  1480. req_isp->bubble_report);
  1481. CAM_DBG(CAM_ISP,
  1482. "ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
  1483. ctx->ctx_id, req_isp->num_deferred_acks, j,
  1484. req_isp->fence_map_out[j].resource_handle,
  1485. req_isp->fence_map_out[j].sync_id);
  1486. continue;
  1487. } else if (!req_isp->bubble_detected) {
  1488. CAM_DBG(CAM_ISP,
  1489. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1490. req->request_id,
  1491. req_isp->fence_map_out[j].resource_handle,
  1492. req_isp->fence_map_out[j].sync_id,
  1493. ctx->ctx_id);
  1494. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1495. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1496. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1497. if (rc) {
  1498. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1499. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1500. } else if (req_isp->num_deferred_acks) {
  1501. /* Process deferred buf_done acks */
  1502. __cam_isp_handle_deferred_buf_done(ctx_isp,
  1503. req, false,
  1504. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1505. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1506. }
  1507. /* Reset fence */
  1508. req_isp->fence_map_out[j].sync_id = -1;
  1509. } else if (!req_isp->bubble_report) {
  1510. CAM_DBG(CAM_ISP,
  1511. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1512. req->request_id,
  1513. req_isp->fence_map_out[j].resource_handle,
  1514. req_isp->fence_map_out[j].sync_id,
  1515. ctx->ctx_id);
  1516. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1517. CAM_SYNC_STATE_SIGNALED_ERROR,
  1518. CAM_SYNC_ISP_EVENT_BUBBLE);
  1519. if (rc) {
  1520. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1521. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1522. } else if (req_isp->num_deferred_acks) {
  1523. /* Process deferred buf_done acks */
  1524. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1525. false,
  1526. CAM_SYNC_STATE_SIGNALED_ERROR,
  1527. CAM_SYNC_ISP_EVENT_BUBBLE);
  1528. }
  1529. /* Reset fence */
  1530. req_isp->fence_map_out[j].sync_id = -1;
  1531. } else {
  1532. /*
  1533. * Ignore the buffer done if bubble detect is on
  1534. * Increment the ack number here, and queue the
  1535. * request back to pending list whenever all the
  1536. * buffers are done.
  1537. */
  1538. req_isp->num_acked++;
  1539. CAM_DBG(CAM_ISP,
  1540. "buf done with bubble state %d recovery %d",
  1541. bubble_state, req_isp->bubble_report);
  1542. /* Process deferred buf_done acks */
  1543. if (req_isp->num_deferred_acks)
  1544. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1545. true,
  1546. CAM_SYNC_STATE_SIGNALED_ERROR,
  1547. CAM_SYNC_ISP_EVENT_BUBBLE);
  1548. continue;
  1549. }
  1550. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1551. req->request_id,
  1552. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1553. if (!rc) {
  1554. req_isp->num_acked++;
  1555. }
  1556. if ((ctx_isp->use_frame_header_ts) &&
  1557. (req_isp->hw_update_data.frame_header_res_id ==
  1558. req_isp->fence_map_out[j].resource_handle))
  1559. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1560. ctx_isp,
  1561. req_isp->hw_update_data.frame_header_cpu_addr,
  1562. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1563. }
  1564. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1565. /* Should not happen */
  1566. CAM_ERR(CAM_ISP,
  1567. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1568. req->request_id, req_isp->num_acked,
  1569. req_isp->num_fence_map_out, ctx->ctx_id);
  1570. }
  1571. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1572. return rc;
  1573. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1574. return rc;
  1575. }
  1576. static int __cam_isp_ctx_handle_buf_done(
  1577. struct cam_isp_context *ctx_isp,
  1578. struct cam_isp_hw_done_event_data *done,
  1579. uint32_t bubble_state)
  1580. {
  1581. int rc = 0;
  1582. struct cam_ctx_request *req;
  1583. struct cam_context *ctx = ctx_isp->base;
  1584. struct cam_isp_hw_done_event_data done_next_req;
  1585. if (list_empty(&ctx->active_req_list)) {
  1586. CAM_WARN(CAM_ISP, "Buf done with no active request");
  1587. return 0;
  1588. }
  1589. req = list_first_entry(&ctx->active_req_list,
  1590. struct cam_ctx_request, list);
  1591. rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done,
  1592. bubble_state, &done_next_req);
  1593. if (done_next_req.num_handles) {
  1594. struct cam_isp_hw_done_event_data unhandled_res;
  1595. struct cam_ctx_request *next_req = list_last_entry(
  1596. &ctx->active_req_list, struct cam_ctx_request, list);
  1597. if (next_req->request_id != req->request_id) {
  1598. /*
  1599. * Few resource handles are already signalled in the
  1600. * current request, lets check if there is another
  1601. * request waiting for these resources. This can
  1602. * happen if handling some of next request's buf done
  1603. * events are happening first before handling current
  1604. * request's remaining buf dones due to IRQ scheduling.
  1605. * Lets check only one more request as we will have
  1606. * maximum of 2 requests in active_list at any time.
  1607. */
  1608. CAM_WARN(CAM_ISP,
  1609. "Unhandled buf done resources for req %lld, trying next request %lld in active_list",
  1610. req->request_id, next_req->request_id);
  1611. __cam_isp_ctx_handle_buf_done_for_request(ctx_isp,
  1612. next_req, &done_next_req,
  1613. bubble_state, &unhandled_res);
  1614. if (unhandled_res.num_handles == 0)
  1615. CAM_INFO(CAM_ISP,
  1616. "BUF Done event handed for next request %lld",
  1617. next_req->request_id);
  1618. else
  1619. CAM_ERR(CAM_ISP,
  1620. "BUF Done not handled for next request %lld",
  1621. next_req->request_id);
  1622. } else {
  1623. CAM_WARN(CAM_ISP,
  1624. "Req %lld only active request, spurious buf_done rxd",
  1625. req->request_id);
  1626. }
  1627. }
  1628. return rc;
  1629. }
  1630. static void __cam_isp_ctx_buf_done_match_req(
  1631. struct cam_ctx_request *req,
  1632. struct cam_isp_hw_done_event_data *done,
  1633. bool *irq_delay_detected)
  1634. {
  1635. int i, j;
  1636. uint32_t match_count = 0;
  1637. struct cam_isp_ctx_req *req_isp;
  1638. uint32_t cmp_addr = 0;
  1639. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1640. for (i = 0; i < done->num_handles; i++) {
  1641. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1642. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1643. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1644. req_isp->fence_map_out[j].image_buf_addr[0];
  1645. if ((done->resource_handle[i] ==
  1646. req_isp->fence_map_out[j].resource_handle) &&
  1647. (done->last_consumed_addr[i] == cmp_addr)) {
  1648. match_count++;
  1649. break;
  1650. }
  1651. }
  1652. }
  1653. if (match_count > 0)
  1654. *irq_delay_detected = true;
  1655. else
  1656. *irq_delay_detected = false;
  1657. CAM_DBG(CAM_ISP,
  1658. "buf done num handles %d match count %d for next req:%lld",
  1659. done->num_handles, match_count, req->request_id);
  1660. CAM_DBG(CAM_ISP,
  1661. "irq_delay_detected %d", *irq_delay_detected);
  1662. }
  1663. static int __cam_isp_ctx_handle_buf_done_verify_addr(
  1664. struct cam_isp_context *ctx_isp,
  1665. struct cam_isp_hw_done_event_data *done,
  1666. uint32_t bubble_state)
  1667. {
  1668. int rc = 0;
  1669. bool irq_delay_detected = false;
  1670. struct cam_ctx_request *req;
  1671. struct cam_ctx_request *next_req = NULL;
  1672. struct cam_context *ctx = ctx_isp->base;
  1673. bool req_in_wait_list = false;
  1674. if (list_empty(&ctx->active_req_list)) {
  1675. if (!list_empty(&ctx->wait_req_list)) {
  1676. struct cam_isp_ctx_req *req_isp;
  1677. req = list_first_entry(&ctx->wait_req_list,
  1678. struct cam_ctx_request, list);
  1679. req_in_wait_list = true;
  1680. if (ctx_isp->last_applied_req_id !=
  1681. ctx_isp->last_bufdone_err_apply_req_id) {
  1682. CAM_WARN(CAM_ISP,
  1683. "Buf done with no active request but with req in wait list, req %llu last apply id:%lld last err id:%lld",
  1684. req->request_id,
  1685. ctx_isp->last_applied_req_id,
  1686. ctx_isp->last_bufdone_err_apply_req_id);
  1687. ctx_isp->last_bufdone_err_apply_req_id =
  1688. ctx_isp->last_applied_req_id;
  1689. }
  1690. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1691. /*
  1692. * Verify consumed address for this request to make sure
  1693. * we are handling the buf_done for the correct
  1694. * buffer. Also defer actual buf_done handling, i.e
  1695. * do not signal the fence as this request may go into
  1696. * Bubble state eventully.
  1697. */
  1698. rc =
  1699. __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1700. ctx_isp, req, done, bubble_state, true, true);
  1701. }
  1702. if (!req_in_wait_list && (ctx_isp->last_applied_req_id !=
  1703. ctx_isp->last_bufdone_err_apply_req_id)) {
  1704. CAM_WARN(CAM_ISP,
  1705. "Buf done with no active request bubble_state=%d last_applied_req_id:%lld ",
  1706. bubble_state, ctx_isp->last_applied_req_id);
  1707. ctx_isp->last_bufdone_err_apply_req_id =
  1708. ctx_isp->last_applied_req_id;
  1709. }
  1710. return 0;
  1711. }
  1712. req = list_first_entry(&ctx->active_req_list,
  1713. struct cam_ctx_request, list);
  1714. if (ctx_isp->active_req_cnt > 1) {
  1715. next_req = list_last_entry(
  1716. &ctx->active_req_list,
  1717. struct cam_ctx_request, list);
  1718. if (next_req->request_id != req->request_id)
  1719. __cam_isp_ctx_buf_done_match_req(next_req, done,
  1720. &irq_delay_detected);
  1721. else
  1722. CAM_WARN(CAM_ISP,
  1723. "Req %lld only active request, spurious buf_done rxd",
  1724. req->request_id);
  1725. }
  1726. /*
  1727. * If irq delay isn't detected, then we need to verify
  1728. * the consumed address for current req, otherwise, we
  1729. * can't verify the consumed address.
  1730. */
  1731. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1732. ctx_isp, req, done, bubble_state,
  1733. !irq_delay_detected, false);
  1734. /*
  1735. * Verify the consumed address for next req all the time,
  1736. * since the reported buf done event may belong to current
  1737. * req, then we can't signal this event for next req.
  1738. */
  1739. if (!rc && irq_delay_detected)
  1740. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1741. ctx_isp, next_req, done,
  1742. bubble_state, true, false);
  1743. return rc;
  1744. }
  1745. static int __cam_isp_ctx_handle_buf_done_in_activated_state(
  1746. struct cam_isp_context *ctx_isp,
  1747. struct cam_isp_hw_done_event_data *done,
  1748. uint32_t bubble_state)
  1749. {
  1750. int rc = 0;
  1751. if (ctx_isp->support_consumed_addr)
  1752. rc = __cam_isp_ctx_handle_buf_done_verify_addr(
  1753. ctx_isp, done, bubble_state);
  1754. else
  1755. rc = __cam_isp_ctx_handle_buf_done(
  1756. ctx_isp, done, bubble_state);
  1757. return rc;
  1758. }
  1759. static int __cam_isp_ctx_apply_req_offline(
  1760. void *priv, void *data)
  1761. {
  1762. int rc = 0;
  1763. int64_t prev_applied_req;
  1764. struct cam_context *ctx = NULL;
  1765. struct cam_isp_context *ctx_isp = priv;
  1766. struct cam_ctx_request *req;
  1767. struct cam_isp_ctx_req *req_isp;
  1768. struct cam_hw_config_args cfg;
  1769. if (!ctx_isp) {
  1770. CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx);
  1771. rc = -EINVAL;
  1772. goto end;
  1773. }
  1774. ctx = ctx_isp->base;
  1775. if (list_empty(&ctx->pending_req_list)) {
  1776. CAM_DBG(CAM_ISP, "No pending requests to apply");
  1777. rc = -EFAULT;
  1778. goto end;
  1779. }
  1780. if ((ctx->state != CAM_CTX_ACTIVATED) ||
  1781. (!atomic_read(&ctx_isp->rxd_epoch)) ||
  1782. (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED))
  1783. goto end;
  1784. if (ctx_isp->active_req_cnt >= 2)
  1785. goto end;
  1786. spin_lock_bh(&ctx->lock);
  1787. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  1788. list);
  1789. spin_unlock_bh(&ctx->lock);
  1790. CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
  1791. req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
  1792. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1793. memset(&cfg, 0, sizeof(cfg));
  1794. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  1795. cfg.request_id = req->request_id;
  1796. cfg.hw_update_entries = req_isp->cfg;
  1797. cfg.num_hw_update_entries = req_isp->num_cfg;
  1798. cfg.priv = &req_isp->hw_update_data;
  1799. cfg.init_packet = 0;
  1800. /*
  1801. * Offline mode may receive the SOF and REG_UPD earlier than
  1802. * CDM processing return back, so we set the substate before
  1803. * apply setting.
  1804. */
  1805. spin_lock_bh(&ctx->lock);
  1806. atomic_set(&ctx_isp->rxd_epoch, 0);
  1807. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
  1808. prev_applied_req = ctx_isp->last_applied_req_id;
  1809. ctx_isp->last_applied_req_id = req->request_id;
  1810. atomic_set(&ctx_isp->apply_in_progress, 1);
  1811. list_del_init(&req->list);
  1812. list_add_tail(&req->list, &ctx->wait_req_list);
  1813. spin_unlock_bh(&ctx->lock);
  1814. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  1815. if (rc) {
  1816. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
  1817. spin_lock_bh(&ctx->lock);
  1818. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  1819. ctx_isp->last_applied_req_id = prev_applied_req;
  1820. atomic_set(&ctx_isp->apply_in_progress, 0);
  1821. list_del_init(&req->list);
  1822. list_add(&req->list, &ctx->pending_req_list);
  1823. spin_unlock_bh(&ctx->lock);
  1824. } else {
  1825. atomic_set(&ctx_isp->apply_in_progress, 0);
  1826. CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
  1827. CAM_ISP_CTX_ACTIVATED_APPLIED,
  1828. ctx_isp->last_applied_req_id);
  1829. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1830. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  1831. req->request_id);
  1832. }
  1833. end:
  1834. return rc;
  1835. }
  1836. static int __cam_isp_ctx_schedule_apply_req_offline(
  1837. struct cam_isp_context *ctx_isp)
  1838. {
  1839. int rc = 0;
  1840. struct crm_workq_task *task;
  1841. task = cam_req_mgr_workq_get_task(ctx_isp->workq);
  1842. if (!task) {
  1843. CAM_ERR(CAM_ISP, "No task for worker");
  1844. return -ENOMEM;
  1845. }
  1846. task->process_cb = __cam_isp_ctx_apply_req_offline;
  1847. rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0);
  1848. if (rc)
  1849. CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc);
  1850. return rc;
  1851. }
  1852. static int __cam_isp_ctx_offline_epoch_in_activated_state(
  1853. struct cam_isp_context *ctx_isp, void *evt_data)
  1854. {
  1855. struct cam_context *ctx = ctx_isp->base;
  1856. struct cam_ctx_request *req, *req_temp;
  1857. uint64_t request_id = 0;
  1858. atomic_set(&ctx_isp->rxd_epoch, 1);
  1859. CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
  1860. ctx->ctx_id);
  1861. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  1862. if (req->request_id > ctx_isp->reported_req_id) {
  1863. request_id = req->request_id;
  1864. ctx_isp->reported_req_id = request_id;
  1865. break;
  1866. }
  1867. }
  1868. __cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
  1869. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  1870. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1871. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1872. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  1873. request_id);
  1874. return 0;
  1875. }
  1876. static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
  1877. struct cam_isp_context *ctx_isp, void *evt_data)
  1878. {
  1879. if (ctx_isp->frame_id == 1)
  1880. CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR",
  1881. __cam_isp_ctx_substate_val_to_type(
  1882. ctx_isp->substate_activated));
  1883. else
  1884. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1885. "ctx_id:%d Unexpected reg update in activated Substate[%s] for frame_id:%lld",
  1886. ctx_isp->base->ctx_id,
  1887. __cam_isp_ctx_substate_val_to_type(
  1888. ctx_isp->substate_activated),
  1889. ctx_isp->frame_id);
  1890. return 0;
  1891. }
  1892. static int __cam_isp_ctx_reg_upd_in_applied_state(
  1893. struct cam_isp_context *ctx_isp, void *evt_data)
  1894. {
  1895. int rc = 0;
  1896. struct cam_ctx_request *req;
  1897. struct cam_context *ctx = ctx_isp->base;
  1898. struct cam_isp_ctx_req *req_isp;
  1899. uint64_t request_id = 0;
  1900. if (list_empty(&ctx->wait_req_list)) {
  1901. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  1902. goto end;
  1903. }
  1904. req = list_first_entry(&ctx->wait_req_list,
  1905. struct cam_ctx_request, list);
  1906. list_del_init(&req->list);
  1907. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1908. if (req_isp->num_fence_map_out != 0) {
  1909. list_add_tail(&req->list, &ctx->active_req_list);
  1910. ctx_isp->active_req_cnt++;
  1911. request_id = req->request_id;
  1912. CAM_DBG(CAM_REQ,
  1913. "move request %lld to active list(cnt = %d), ctx %u",
  1914. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1915. __cam_isp_ctx_update_event_record(ctx_isp,
  1916. CAM_ISP_CTX_EVENT_RUP, req);
  1917. } else {
  1918. /* no io config, so the request is completed. */
  1919. list_add_tail(&req->list, &ctx->free_req_list);
  1920. CAM_DBG(CAM_ISP,
  1921. "move active request %lld to free list(cnt = %d), ctx %u",
  1922. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1923. }
  1924. /*
  1925. * This function only called directly from applied and bubble applied
  1926. * state so change substate here.
  1927. */
  1928. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  1929. CAM_DBG(CAM_ISP, "next Substate[%s]",
  1930. __cam_isp_ctx_substate_val_to_type(
  1931. ctx_isp->substate_activated));
  1932. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1933. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id);
  1934. end:
  1935. return rc;
  1936. }
  1937. static int __cam_isp_ctx_notify_sof_in_activated_state(
  1938. struct cam_isp_context *ctx_isp, void *evt_data)
  1939. {
  1940. int rc = 0;
  1941. uint64_t request_id = 0;
  1942. struct cam_context *ctx = ctx_isp->base;
  1943. struct cam_ctx_request *req;
  1944. struct cam_isp_ctx_req *req_isp;
  1945. struct cam_hw_cmd_args hw_cmd_args;
  1946. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  1947. uint64_t last_cdm_done_req = 0;
  1948. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  1949. (struct cam_isp_hw_epoch_event_data *)evt_data;
  1950. if (!evt_data) {
  1951. CAM_ERR(CAM_ISP, "invalid event data");
  1952. return -EINVAL;
  1953. }
  1954. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  1955. if (atomic_read(&ctx_isp->process_bubble)) {
  1956. if (list_empty(&ctx->active_req_list)) {
  1957. CAM_ERR(CAM_ISP,
  1958. "No available active req in bubble");
  1959. atomic_set(&ctx_isp->process_bubble, 0);
  1960. ctx_isp->bubble_frame_cnt = 0;
  1961. rc = -EINVAL;
  1962. return rc;
  1963. }
  1964. if (ctx_isp->last_sof_timestamp ==
  1965. ctx_isp->sof_timestamp_val) {
  1966. CAM_DBG(CAM_ISP,
  1967. "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
  1968. ctx_isp->sof_timestamp_val);
  1969. goto notify_only;
  1970. }
  1971. req = list_first_entry(&ctx->active_req_list,
  1972. struct cam_ctx_request, list);
  1973. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1974. if (ctx_isp->bubble_frame_cnt >= 1 &&
  1975. req_isp->bubble_detected) {
  1976. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  1977. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  1978. isp_hw_cmd_args.cmd_type =
  1979. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  1980. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  1981. rc = ctx->hw_mgr_intf->hw_cmd(
  1982. ctx->hw_mgr_intf->hw_mgr_priv,
  1983. &hw_cmd_args);
  1984. if (rc) {
  1985. CAM_ERR(CAM_ISP, "HW command failed");
  1986. return rc;
  1987. }
  1988. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  1989. CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
  1990. last_cdm_done_req);
  1991. if (last_cdm_done_req >= req->request_id) {
  1992. CAM_DBG(CAM_ISP,
  1993. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  1994. req->request_id);
  1995. ctx_isp->bubble_frame_cnt = 0;
  1996. } else {
  1997. CAM_DBG(CAM_ISP,
  1998. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  1999. req->request_id);
  2000. req_isp->num_acked = 0;
  2001. req_isp->num_deferred_acks = 0;
  2002. ctx_isp->bubble_frame_cnt = 0;
  2003. req_isp->bubble_detected = false;
  2004. req_isp->cdm_reset_before_apply = true;
  2005. list_del_init(&req->list);
  2006. list_add(&req->list, &ctx->pending_req_list);
  2007. atomic_set(&ctx_isp->process_bubble, 0);
  2008. ctx_isp->active_req_cnt--;
  2009. CAM_DBG(CAM_REQ,
  2010. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  2011. req->request_id,
  2012. ctx_isp->active_req_cnt, ctx->ctx_id);
  2013. }
  2014. } else if (req_isp->bubble_detected) {
  2015. ctx_isp->bubble_frame_cnt++;
  2016. CAM_DBG(CAM_ISP,
  2017. "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
  2018. req->request_id,
  2019. ctx_isp->bubble_frame_cnt);
  2020. } else {
  2021. CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
  2022. req->request_id);
  2023. }
  2024. }
  2025. notify_only:
  2026. /*
  2027. * notify reqmgr with sof signal. Note, due to scheduling delay
  2028. * we can run into situation that two active requests has already
  2029. * be in the active queue while we try to do the notification.
  2030. * In this case, we need to skip the current notification. This
  2031. * helps the state machine to catch up the delay.
  2032. */
  2033. if (ctx_isp->active_req_cnt <= 2) {
  2034. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2035. list_for_each_entry(req, &ctx->active_req_list, list) {
  2036. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2037. if ((!req_isp->bubble_detected) &&
  2038. (req->request_id > ctx_isp->reported_req_id)) {
  2039. request_id = req->request_id;
  2040. __cam_isp_ctx_update_event_record(ctx_isp,
  2041. CAM_ISP_CTX_EVENT_EPOCH, req);
  2042. break;
  2043. }
  2044. }
  2045. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
  2046. request_id = 0;
  2047. if (request_id != 0)
  2048. ctx_isp->reported_req_id = request_id;
  2049. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2050. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2051. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2052. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2053. request_id);
  2054. }
  2055. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  2056. return 0;
  2057. }
  2058. static int __cam_isp_ctx_notify_eof_in_activated_state(
  2059. struct cam_isp_context *ctx_isp, void *evt_data)
  2060. {
  2061. int rc = 0;
  2062. /* notify reqmgr with eof signal */
  2063. rc = __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_EOF, ctx_isp);
  2064. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2065. CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0);
  2066. return rc;
  2067. }
  2068. static int __cam_isp_ctx_reg_upd_in_hw_error(
  2069. struct cam_isp_context *ctx_isp, void *evt_data)
  2070. {
  2071. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2072. return 0;
  2073. }
  2074. static int __cam_isp_ctx_sof_in_activated_state(
  2075. struct cam_isp_context *ctx_isp, void *evt_data)
  2076. {
  2077. int rc = 0;
  2078. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2079. struct cam_ctx_request *req = NULL;
  2080. struct cam_context *ctx = ctx_isp->base;
  2081. uint64_t request_id = 0;
  2082. /* First check if there is a valid request in active list */
  2083. list_for_each_entry(req, &ctx->active_req_list, list) {
  2084. if (req->request_id > ctx_isp->reported_req_id) {
  2085. request_id = req->request_id;
  2086. break;
  2087. }
  2088. }
  2089. /*
  2090. * If nothing in active list, current request might have not moved
  2091. * from wait to active list. This could happen if REG_UPDATE to sw
  2092. * is coming immediately after SOF
  2093. */
  2094. if (request_id == 0) {
  2095. req = list_first_entry(&ctx->wait_req_list,
  2096. struct cam_ctx_request, list);
  2097. if (req)
  2098. request_id = req->request_id;
  2099. }
  2100. if (!evt_data) {
  2101. CAM_ERR(CAM_ISP, "in valid sof event data");
  2102. return -EINVAL;
  2103. }
  2104. ctx_isp->frame_id++;
  2105. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  2106. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  2107. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2108. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2109. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u",
  2110. ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);
  2111. return rc;
  2112. }
  2113. static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2114. void *evt_data)
  2115. {
  2116. int rc = 0;
  2117. struct cam_ctx_request *req = NULL;
  2118. struct cam_isp_ctx_req *req_isp;
  2119. struct cam_context *ctx = ctx_isp->base;
  2120. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2121. CAM_DBG(CAM_ISP, "invalid RUP");
  2122. goto end;
  2123. }
  2124. /*
  2125. * This is for the first update. The initial setting will
  2126. * cause the reg_upd in the first frame.
  2127. */
  2128. if (!list_empty(&ctx->wait_req_list)) {
  2129. req = list_first_entry(&ctx->wait_req_list,
  2130. struct cam_ctx_request, list);
  2131. list_del_init(&req->list);
  2132. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2133. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2134. list_add_tail(&req->list, &ctx->free_req_list);
  2135. else
  2136. CAM_ERR(CAM_ISP,
  2137. "receive rup in unexpected state");
  2138. }
  2139. if (req != NULL) {
  2140. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2141. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2142. req->request_id);
  2143. }
  2144. end:
  2145. return rc;
  2146. }
  2147. static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
  2148. void *evt_data)
  2149. {
  2150. uint64_t request_id = 0;
  2151. uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
  2152. struct cam_ctx_request *req;
  2153. struct cam_isp_ctx_req *req_isp;
  2154. struct cam_context *ctx = ctx_isp->base;
  2155. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2156. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2157. if (!evt_data) {
  2158. CAM_ERR(CAM_ISP, "invalid event data");
  2159. return -EINVAL;
  2160. }
  2161. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2162. if (list_empty(&ctx->wait_req_list)) {
  2163. /*
  2164. * If no wait req in epoch, this is an error case.
  2165. * The recovery is to go back to sof state
  2166. */
  2167. CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
  2168. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2169. /* Send SOF event as empty frame*/
  2170. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2171. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2172. __cam_isp_ctx_update_event_record(ctx_isp,
  2173. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2174. goto end;
  2175. }
  2176. /* Update state prior to notifying CRM */
  2177. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2178. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2179. list);
  2180. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2181. req_isp->bubble_detected = true;
  2182. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2183. req_isp->cdm_reset_before_apply = false;
  2184. CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
  2185. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2186. if (req_isp->bubble_report) {
  2187. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2188. req->request_id, ctx_isp);
  2189. trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
  2190. req->request_id, ctx->ctx_id);
  2191. atomic_set(&ctx_isp->process_bubble, 1);
  2192. } else {
  2193. req_isp->bubble_report = 0;
  2194. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2195. req->request_id, ctx->ctx_id);
  2196. if (ctx_isp->active_req_cnt <= 1)
  2197. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2198. atomic_set(&ctx_isp->process_bubble, 1);
  2199. }
  2200. /*
  2201. * Always move the request to active list. Let buf done
  2202. * function handles the rest.
  2203. */
  2204. list_del_init(&req->list);
  2205. list_add_tail(&req->list, &ctx->active_req_list);
  2206. ctx_isp->active_req_cnt++;
  2207. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
  2208. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2209. /*
  2210. * Update the record before req pointer to
  2211. * other invalid req.
  2212. */
  2213. __cam_isp_ctx_update_event_record(ctx_isp,
  2214. CAM_ISP_CTX_EVENT_EPOCH, req);
  2215. /*
  2216. * Get the req again from active_req_list in case
  2217. * the active req cnt is 2.
  2218. */
  2219. list_for_each_entry(req, &ctx->active_req_list, list) {
  2220. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2221. if ((!req_isp->bubble_report) &&
  2222. (req->request_id > ctx_isp->reported_req_id)) {
  2223. request_id = req->request_id;
  2224. ctx_isp->reported_req_id = request_id;
  2225. CAM_DBG(CAM_ISP,
  2226. "ctx %d reported_req_id update to %lld",
  2227. ctx->ctx_id, ctx_isp->reported_req_id);
  2228. break;
  2229. }
  2230. }
  2231. if ((request_id != 0) && req_isp->bubble_detected)
  2232. sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR;
  2233. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2234. sof_event_status);
  2235. cam_req_mgr_debug_delay_detect();
  2236. trace_cam_delay_detect("ISP",
  2237. "bubble epoch_in_applied", req->request_id,
  2238. ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
  2239. CAM_DEFAULT_VALUE);
  2240. end:
  2241. if (request_id == 0) {
  2242. req = list_last_entry(&ctx->active_req_list,
  2243. struct cam_ctx_request, list);
  2244. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2245. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2246. } else {
  2247. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2248. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
  2249. }
  2250. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2251. __cam_isp_ctx_substate_val_to_type(
  2252. ctx_isp->substate_activated));
  2253. return 0;
  2254. }
  2255. static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
  2256. void *evt_data)
  2257. {
  2258. int rc = 0;
  2259. struct cam_isp_hw_done_event_data *done =
  2260. (struct cam_isp_hw_done_event_data *) evt_data;
  2261. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2262. return rc;
  2263. }
  2264. static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
  2265. void *evt_data)
  2266. {
  2267. int rc = 0;
  2268. struct cam_context *ctx = ctx_isp->base;
  2269. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2270. struct cam_ctx_request *req;
  2271. if (!evt_data) {
  2272. CAM_ERR(CAM_ISP, "in valid sof event data");
  2273. return -EINVAL;
  2274. }
  2275. if (atomic_read(&ctx_isp->apply_in_progress))
  2276. CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
  2277. ctx_isp->frame_id++;
  2278. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  2279. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  2280. if (list_empty(&ctx->active_req_list))
  2281. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2282. else
  2283. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  2284. req = list_last_entry(&ctx->active_req_list,
  2285. struct cam_ctx_request, list);
  2286. if (req)
  2287. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2288. CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
  2289. req->request_id);
  2290. if (ctx_isp->frame_id == 1)
  2291. CAM_INFO(CAM_ISP,
  2292. "First SOF in EPCR ctx:%d frame_id:%lld next substate %s",
  2293. ctx->ctx_id, ctx_isp->frame_id,
  2294. __cam_isp_ctx_substate_val_to_type(
  2295. ctx_isp->substate_activated));
  2296. CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
  2297. ctx->ctx_id, ctx_isp->frame_id,
  2298. __cam_isp_ctx_substate_val_to_type(
  2299. ctx_isp->substate_activated));
  2300. return rc;
  2301. }
  2302. static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2303. void *evt_data)
  2304. {
  2305. int rc = 0;
  2306. struct cam_isp_hw_done_event_data *done =
  2307. (struct cam_isp_hw_done_event_data *) evt_data;
  2308. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2309. return rc;
  2310. }
  2311. static int __cam_isp_ctx_buf_done_in_bubble(
  2312. struct cam_isp_context *ctx_isp, void *evt_data)
  2313. {
  2314. int rc = 0;
  2315. struct cam_isp_hw_done_event_data *done =
  2316. (struct cam_isp_hw_done_event_data *) evt_data;
  2317. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2318. return rc;
  2319. }
  2320. static int __cam_isp_ctx_epoch_in_bubble_applied(
  2321. struct cam_isp_context *ctx_isp, void *evt_data)
  2322. {
  2323. uint64_t request_id = 0;
  2324. struct cam_ctx_request *req;
  2325. struct cam_isp_ctx_req *req_isp;
  2326. struct cam_context *ctx = ctx_isp->base;
  2327. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2328. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2329. if (!evt_data) {
  2330. CAM_ERR(CAM_ISP, "invalid event data");
  2331. return -EINVAL;
  2332. }
  2333. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2334. /*
  2335. * This means we missed the reg upd ack. So we need to
  2336. * transition to BUBBLE state again.
  2337. */
  2338. if (list_empty(&ctx->wait_req_list)) {
  2339. /*
  2340. * If no pending req in epoch, this is an error case.
  2341. * Just go back to the bubble state.
  2342. */
  2343. CAM_ERR(CAM_ISP, "ctx:%d No pending request.", ctx->ctx_id);
  2344. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2345. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2346. __cam_isp_ctx_update_event_record(ctx_isp,
  2347. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2348. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2349. goto end;
  2350. }
  2351. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2352. list);
  2353. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2354. req_isp->bubble_detected = true;
  2355. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  2356. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2357. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2358. req_isp->cdm_reset_before_apply = false;
  2359. if (req_isp->bubble_report) {
  2360. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2361. req->request_id, ctx_isp);
  2362. atomic_set(&ctx_isp->process_bubble, 1);
  2363. } else {
  2364. req_isp->bubble_report = 0;
  2365. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2366. req->request_id, ctx->ctx_id);
  2367. if (ctx_isp->active_req_cnt <= 1)
  2368. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2369. atomic_set(&ctx_isp->process_bubble, 1);
  2370. }
  2371. /*
  2372. * Always move the request to active list. Let buf done
  2373. * function handles the rest.
  2374. */
  2375. list_del_init(&req->list);
  2376. list_add_tail(&req->list, &ctx->active_req_list);
  2377. ctx_isp->active_req_cnt++;
  2378. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u",
  2379. req->request_id, ctx_isp->active_req_cnt);
  2380. if (!req_isp->bubble_report) {
  2381. if (req->request_id > ctx_isp->reported_req_id) {
  2382. request_id = req->request_id;
  2383. ctx_isp->reported_req_id = request_id;
  2384. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2385. CAM_REQ_MGR_SOF_EVENT_ERROR);
  2386. __cam_isp_ctx_update_event_record(ctx_isp,
  2387. CAM_ISP_CTX_EVENT_EPOCH, req);
  2388. } else {
  2389. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2390. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2391. __cam_isp_ctx_update_event_record(ctx_isp,
  2392. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2393. }
  2394. } else {
  2395. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2396. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2397. __cam_isp_ctx_update_event_record(ctx_isp,
  2398. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2399. }
  2400. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2401. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2402. __cam_isp_ctx_substate_val_to_type(
  2403. ctx_isp->substate_activated));
  2404. cam_req_mgr_debug_delay_detect();
  2405. trace_cam_delay_detect("ISP",
  2406. "bubble epoch_in_bubble_applied",
  2407. req->request_id, ctx->ctx_id,
  2408. ctx->link_hdl, ctx->session_hdl,
  2409. CAM_DEFAULT_VALUE);
  2410. end:
  2411. req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
  2412. list);
  2413. if (req)
  2414. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2415. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2416. return 0;
  2417. }
  2418. static int __cam_isp_ctx_buf_done_in_bubble_applied(
  2419. struct cam_isp_context *ctx_isp, void *evt_data)
  2420. {
  2421. int rc = 0;
  2422. struct cam_isp_hw_done_event_data *done =
  2423. (struct cam_isp_hw_done_event_data *) evt_data;
  2424. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2425. return rc;
  2426. }
  2427. static uint32_t get_evt_param(uint32_t error_type)
  2428. {
  2429. switch (error_type) {
  2430. case CAM_ISP_HW_ERROR_OVERFLOW:
  2431. return CAM_SYNC_ISP_EVENT_OVERFLOW;
  2432. case CAM_ISP_HW_ERROR_P2I_ERROR:
  2433. return CAM_SYNC_ISP_EVENT_P2I_ERROR;
  2434. case CAM_ISP_HW_ERROR_VIOLATION:
  2435. return CAM_SYNC_ISP_EVENT_VIOLATION;
  2436. case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
  2437. return CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW;
  2438. default:
  2439. return CAM_SYNC_ISP_EVENT_UNKNOWN;
  2440. }
  2441. }
  2442. static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
  2443. void *evt_data)
  2444. {
  2445. int rc = 0;
  2446. enum cam_req_mgr_device_error error;
  2447. uint32_t i = 0;
  2448. bool found = 0;
  2449. struct cam_ctx_request *req = NULL;
  2450. struct cam_ctx_request *req_to_report = NULL;
  2451. struct cam_ctx_request *req_to_dump = NULL;
  2452. struct cam_ctx_request *req_temp;
  2453. struct cam_isp_ctx_req *req_isp = NULL;
  2454. struct cam_isp_ctx_req *req_isp_to_report = NULL;
  2455. uint64_t error_request_id;
  2456. struct cam_hw_fence_map_entry *fence_map_out = NULL;
  2457. uint32_t evt_param;
  2458. struct cam_context *ctx = ctx_isp->base;
  2459. struct cam_isp_hw_error_event_data *error_event_data =
  2460. (struct cam_isp_hw_error_event_data *)evt_data;
  2461. uint32_t error_type = error_event_data->error_type;
  2462. CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
  2463. __cam_isp_ctx_pause_crm_timer(ctx);
  2464. if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
  2465. (error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) ||
  2466. (error_type == CAM_ISP_HW_ERROR_VIOLATION))
  2467. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  2468. evt_param = get_evt_param(error_type);
  2469. /*
  2470. * The error is likely caused by first request on the active list.
  2471. * If active list is empty check wait list (maybe error hit as soon
  2472. * as RUP and we handle error before RUP.
  2473. */
  2474. if (list_empty(&ctx->active_req_list)) {
  2475. CAM_DBG(CAM_ISP,
  2476. "handling error with no active request");
  2477. if (list_empty(&ctx->wait_req_list)) {
  2478. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2479. "Error with no active/wait request");
  2480. goto end;
  2481. } else {
  2482. req_to_dump = list_first_entry(&ctx->wait_req_list,
  2483. struct cam_ctx_request, list);
  2484. }
  2485. } else {
  2486. req_to_dump = list_first_entry(&ctx->active_req_list,
  2487. struct cam_ctx_request, list);
  2488. }
  2489. req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
  2490. if (error_event_data->enable_req_dump)
  2491. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  2492. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2493. CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
  2494. list_for_each_entry_safe(req, req_temp,
  2495. &ctx->active_req_list, list) {
  2496. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2497. if (!req_isp->bubble_report) {
  2498. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  2499. req->request_id);
  2500. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2501. fence_map_out =
  2502. &req_isp->fence_map_out[i];
  2503. if (req_isp->fence_map_out[i].sync_id != -1) {
  2504. CAM_DBG(CAM_ISP,
  2505. "req %llu, Sync fd 0x%x ctx %u",
  2506. req->request_id,
  2507. req_isp->fence_map_out[i].sync_id,
  2508. ctx->ctx_id);
  2509. rc = cam_sync_signal(
  2510. fence_map_out->sync_id,
  2511. CAM_SYNC_STATE_SIGNALED_ERROR,
  2512. evt_param);
  2513. fence_map_out->sync_id = -1;
  2514. }
  2515. }
  2516. list_del_init(&req->list);
  2517. list_add_tail(&req->list, &ctx->free_req_list);
  2518. ctx_isp->active_req_cnt--;
  2519. } else {
  2520. found = 1;
  2521. break;
  2522. }
  2523. }
  2524. if (found)
  2525. goto move_to_pending;
  2526. list_for_each_entry_safe(req, req_temp,
  2527. &ctx->wait_req_list, list) {
  2528. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2529. if (!req_isp->bubble_report) {
  2530. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  2531. req->request_id);
  2532. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2533. fence_map_out =
  2534. &req_isp->fence_map_out[i];
  2535. if (req_isp->fence_map_out[i].sync_id != -1) {
  2536. CAM_DBG(CAM_ISP,
  2537. "req %llu, Sync fd 0x%x ctx %u",
  2538. req->request_id,
  2539. req_isp->fence_map_out[i].sync_id,
  2540. ctx->ctx_id);
  2541. rc = cam_sync_signal(
  2542. fence_map_out->sync_id,
  2543. CAM_SYNC_STATE_SIGNALED_ERROR,
  2544. evt_param);
  2545. fence_map_out->sync_id = -1;
  2546. }
  2547. }
  2548. list_del_init(&req->list);
  2549. list_add_tail(&req->list, &ctx->free_req_list);
  2550. } else {
  2551. found = 1;
  2552. break;
  2553. }
  2554. }
  2555. move_to_pending:
  2556. /*
  2557. * If bubble recovery is enabled on any request we need to move that
  2558. * request and all the subsequent requests to the pending list.
  2559. * Note:
  2560. * We need to traverse the active list in reverse order and add
  2561. * to head of pending list.
  2562. * e.g. pending current state: 10, 11 | active current state: 8, 9
  2563. * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
  2564. * final state - pending: 8, 9, 10, 11 | active: NULL
  2565. */
  2566. if (found) {
  2567. list_for_each_entry_safe_reverse(req, req_temp,
  2568. &ctx->active_req_list, list) {
  2569. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2570. list_del_init(&req->list);
  2571. list_add(&req->list, &ctx->pending_req_list);
  2572. ctx_isp->active_req_cnt--;
  2573. }
  2574. list_for_each_entry_safe_reverse(req, req_temp,
  2575. &ctx->wait_req_list, list) {
  2576. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2577. list_del_init(&req->list);
  2578. list_add(&req->list, &ctx->pending_req_list);
  2579. }
  2580. }
  2581. end:
  2582. do {
  2583. if (list_empty(&ctx->pending_req_list)) {
  2584. error_request_id = ctx_isp->last_applied_req_id;
  2585. req_isp = NULL;
  2586. break;
  2587. }
  2588. req = list_first_entry(&ctx->pending_req_list,
  2589. struct cam_ctx_request, list);
  2590. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2591. error_request_id = ctx_isp->last_applied_req_id;
  2592. if (req_isp->bubble_report) {
  2593. req_to_report = req;
  2594. req_isp_to_report = req_to_report->req_priv;
  2595. break;
  2596. }
  2597. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2598. if (req_isp->fence_map_out[i].sync_id != -1)
  2599. rc = cam_sync_signal(
  2600. req_isp->fence_map_out[i].sync_id,
  2601. CAM_SYNC_STATE_SIGNALED_ERROR,
  2602. evt_param);
  2603. req_isp->fence_map_out[i].sync_id = -1;
  2604. }
  2605. list_del_init(&req->list);
  2606. list_add_tail(&req->list, &ctx->free_req_list);
  2607. } while (req->request_id < ctx_isp->last_applied_req_id);
  2608. if (ctx_isp->offline_context)
  2609. goto exit;
  2610. error = CRM_KMD_ERR_FATAL;
  2611. if (req_isp_to_report && req_isp_to_report->bubble_report)
  2612. if (error_event_data->recovery_enabled)
  2613. error = CRM_KMD_ERR_BUBBLE;
  2614. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, error,
  2615. error_request_id, ctx_isp);
  2616. /*
  2617. * Need to send error occurred in KMD
  2618. * This will help UMD to take necessary action
  2619. * and to dump relevant info
  2620. */
  2621. if (error == CRM_KMD_ERR_FATAL) {
  2622. uint32_t req_mgr_error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2623. if (error_type == CAM_ISP_HW_ERROR_CSID_FATAL)
  2624. req_mgr_error_type =
  2625. CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2626. __cam_isp_ctx_notify_v4l2_error_event(req_mgr_error_type,
  2627. error_event_data->error_code, error_request_id, ctx);
  2628. }
  2629. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
  2630. CAM_DBG(CAM_ISP, "Handling error done on ctx: %u", ctx->ctx_id);
  2631. exit:
  2632. return rc;
  2633. }
  2634. static int __cam_isp_ctx_fs2_sof_in_sof_state(
  2635. struct cam_isp_context *ctx_isp, void *evt_data)
  2636. {
  2637. int rc = 0;
  2638. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2639. struct cam_ctx_request *req;
  2640. struct cam_context *ctx = ctx_isp->base;
  2641. uint64_t request_id = 0;
  2642. if (!evt_data) {
  2643. CAM_ERR(CAM_ISP, "in valid sof event data");
  2644. return -EINVAL;
  2645. }
  2646. ctx_isp->frame_id++;
  2647. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  2648. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  2649. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  2650. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  2651. if (!(list_empty(&ctx->wait_req_list)))
  2652. goto end;
  2653. if (ctx_isp->active_req_cnt <= 2) {
  2654. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2655. list_for_each_entry(req, &ctx->active_req_list, list) {
  2656. if (req->request_id > ctx_isp->reported_req_id) {
  2657. request_id = req->request_id;
  2658. ctx_isp->reported_req_id = request_id;
  2659. break;
  2660. }
  2661. }
  2662. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2663. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2664. }
  2665. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2666. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2667. end:
  2668. return rc;
  2669. }
  2670. static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp,
  2671. void *evt_data)
  2672. {
  2673. int rc = 0;
  2674. struct cam_isp_hw_done_event_data *done =
  2675. (struct cam_isp_hw_done_event_data *) evt_data;
  2676. struct cam_context *ctx = ctx_isp->base;
  2677. int prev_active_req_cnt = 0;
  2678. int curr_req_id = 0;
  2679. struct cam_ctx_request *req;
  2680. prev_active_req_cnt = ctx_isp->active_req_cnt;
  2681. req = list_first_entry(&ctx->active_req_list,
  2682. struct cam_ctx_request, list);
  2683. if (req)
  2684. curr_req_id = req->request_id;
  2685. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2686. if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) {
  2687. if (list_empty(&ctx->wait_req_list) &&
  2688. list_empty(&ctx->active_req_list)) {
  2689. CAM_DBG(CAM_ISP, "No request, move to SOF");
  2690. ctx_isp->substate_activated =
  2691. CAM_ISP_CTX_ACTIVATED_SOF;
  2692. if (ctx_isp->reported_req_id < curr_req_id) {
  2693. ctx_isp->reported_req_id = curr_req_id;
  2694. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  2695. curr_req_id,
  2696. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2697. }
  2698. }
  2699. }
  2700. return rc;
  2701. }
  2702. static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2703. void *evt_data)
  2704. {
  2705. int rc = 0;
  2706. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  2707. return rc;
  2708. }
  2709. static int __cam_isp_ctx_fs2_buf_done_in_applied(
  2710. struct cam_isp_context *ctx_isp,
  2711. void *evt_data)
  2712. {
  2713. int rc = 0;
  2714. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  2715. return rc;
  2716. }
  2717. static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2718. void *evt_data)
  2719. {
  2720. int rc = 0;
  2721. struct cam_ctx_request *req = NULL;
  2722. struct cam_isp_ctx_req *req_isp;
  2723. struct cam_context *ctx = ctx_isp->base;
  2724. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2725. CAM_DBG(CAM_ISP, "invalid RUP");
  2726. goto end;
  2727. }
  2728. /*
  2729. * This is for the first update. The initial setting will
  2730. * cause the reg_upd in the first frame.
  2731. */
  2732. if (!list_empty(&ctx->wait_req_list)) {
  2733. req = list_first_entry(&ctx->wait_req_list,
  2734. struct cam_ctx_request, list);
  2735. list_del_init(&req->list);
  2736. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2737. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2738. list_add_tail(&req->list, &ctx->free_req_list);
  2739. else
  2740. CAM_ERR(CAM_ISP,
  2741. "receive rup in unexpected state");
  2742. }
  2743. if (req != NULL) {
  2744. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2745. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2746. req->request_id);
  2747. }
  2748. end:
  2749. return rc;
  2750. }
  2751. static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
  2752. struct cam_isp_context *ctx_isp, void *evt_data)
  2753. {
  2754. int rc = 0;
  2755. struct cam_ctx_request *req = NULL;
  2756. struct cam_context *ctx = ctx_isp->base;
  2757. struct cam_isp_ctx_req *req_isp;
  2758. uint64_t request_id = 0;
  2759. if (list_empty(&ctx->wait_req_list)) {
  2760. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  2761. goto end;
  2762. }
  2763. req = list_first_entry(&ctx->wait_req_list,
  2764. struct cam_ctx_request, list);
  2765. list_del_init(&req->list);
  2766. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2767. if (req_isp->num_fence_map_out != 0) {
  2768. list_add_tail(&req->list, &ctx->active_req_list);
  2769. ctx_isp->active_req_cnt++;
  2770. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
  2771. req->request_id, ctx_isp->active_req_cnt);
  2772. } else {
  2773. /* no io config, so the request is completed. */
  2774. list_add_tail(&req->list, &ctx->free_req_list);
  2775. }
  2776. /*
  2777. * This function only called directly from applied and bubble applied
  2778. * state so change substate here.
  2779. */
  2780. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2781. if (req_isp->num_fence_map_out != 1)
  2782. goto end;
  2783. if (ctx_isp->active_req_cnt <= 2) {
  2784. list_for_each_entry(req, &ctx->active_req_list, list) {
  2785. if (req->request_id > ctx_isp->reported_req_id) {
  2786. request_id = req->request_id;
  2787. ctx_isp->reported_req_id = request_id;
  2788. break;
  2789. }
  2790. }
  2791. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2792. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2793. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2794. }
  2795. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2796. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated));
  2797. end:
  2798. if (req != NULL && !rc) {
  2799. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2800. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2801. req->request_id);
  2802. }
  2803. return rc;
  2804. }
  2805. static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
  2806. struct cam_isp_context *ctx_isp)
  2807. {
  2808. struct cam_context *ctx = ctx_isp->base;
  2809. CAM_ERR(CAM_ISP,
  2810. "AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x",
  2811. ctx->ctx_id, ctx->link_hdl);
  2812. /* Pause CRM timer */
  2813. __cam_isp_ctx_pause_crm_timer(ctx);
  2814. /* Trigger reg dump */
  2815. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  2816. /* Notify CRM on fatal error */
  2817. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL,
  2818. ctx_isp->last_applied_req_id, ctx_isp);
  2819. /* Notify userland on error */
  2820. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
  2821. CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx);
  2822. /* Change state to HALT, stop further processing of HW events */
  2823. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  2824. }
  2825. static int __cam_isp_ctx_trigger_internal_recovery(
  2826. bool sync_frame_drop, struct cam_isp_context *ctx_isp)
  2827. {
  2828. int rc = 0;
  2829. bool do_recovery = true;
  2830. struct cam_context *ctx = ctx_isp->base;
  2831. struct cam_ctx_request *req = NULL;
  2832. struct cam_isp_ctx_req *req_isp = NULL;
  2833. if (list_empty(&ctx->wait_req_list)) {
  2834. /*
  2835. * If the wait list is empty, and we encounter a "silent" frame drop
  2836. * then the settings applied on the previous frame, did not reflect
  2837. * at the next frame boundary, it's expected to latch a frame after.
  2838. * No need to recover. If it's an out of sync drop use pending req
  2839. */
  2840. if (sync_frame_drop && !list_empty(&ctx->pending_req_list))
  2841. req = list_first_entry(&ctx->pending_req_list,
  2842. struct cam_ctx_request, list);
  2843. else
  2844. do_recovery = false;
  2845. }
  2846. /* If both wait and pending list have no request to recover on */
  2847. if (!do_recovery) {
  2848. CAM_WARN(CAM_ISP,
  2849. "No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld",
  2850. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id,
  2851. ctx_isp->req_info.last_bufdone_req_id);
  2852. goto end;
  2853. }
  2854. if (!req) {
  2855. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  2856. if (req->request_id != ctx_isp->last_applied_req_id)
  2857. CAM_WARN(CAM_ISP,
  2858. "Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x",
  2859. req->request_id, ctx_isp->last_applied_req_id,
  2860. ctx->ctx_id, ctx->link_hdl);
  2861. }
  2862. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2863. /*
  2864. * Treat this as bubble, after recovery re-start from appropriate sub-state
  2865. * This will block servicing any further apply calls from CRM
  2866. */
  2867. atomic_set(&ctx_isp->internal_recovery_set, 1);
  2868. atomic_set(&ctx_isp->process_bubble, 1);
  2869. ctx_isp->recovery_req_id = req->request_id;
  2870. /* Wait for active request's to finish before issuing recovery */
  2871. if (ctx_isp->active_req_cnt) {
  2872. req_isp->bubble_detected = true;
  2873. CAM_WARN(CAM_ISP,
  2874. "Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x",
  2875. ctx_isp->active_req_cnt, ctx_isp->recovery_req_id,
  2876. ctx->ctx_id, ctx->link_hdl);
  2877. } else {
  2878. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2879. ctx_isp->recovery_req_id, ctx_isp);
  2880. if (rc) {
  2881. /* Unable to do bubble recovery reset back to normal */
  2882. CAM_WARN(CAM_ISP,
  2883. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  2884. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  2885. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  2886. goto end;
  2887. }
  2888. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2889. list_del_init(&req->list);
  2890. list_add(&req->list, &ctx->pending_req_list);
  2891. }
  2892. end:
  2893. return rc;
  2894. }
  2895. static int __cam_isp_ctx_handle_secondary_events(
  2896. struct cam_isp_context *ctx_isp, void *evt_data)
  2897. {
  2898. int rc = 0;
  2899. bool recover = false, sync_frame_drop = false;
  2900. struct cam_context *ctx = ctx_isp->base;
  2901. struct cam_isp_hw_secondary_event_data *sec_evt_data =
  2902. (struct cam_isp_hw_secondary_event_data *)evt_data;
  2903. /* Current scheme to handle only for custom AEB */
  2904. if (!ctx_isp->aeb_enabled) {
  2905. CAM_WARN(CAM_ISP,
  2906. "Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u",
  2907. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  2908. goto end;
  2909. }
  2910. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  2911. CAM_WARN(CAM_ISP,
  2912. "Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u",
  2913. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  2914. goto end;
  2915. }
  2916. /*
  2917. * In case of custom AEB ensure first exposure frame has
  2918. * not moved forward with its settings without second/third
  2919. * expoure frame coming in. Also track for bubble, in case of system
  2920. * delays it's possible for the IFE settings to be not written to
  2921. * HW on a given frame. If these scenarios occurs flag as error,
  2922. * and recover.
  2923. */
  2924. switch (sec_evt_data->evt_type) {
  2925. case CAM_ISP_HW_SEC_EVENT_SOF:
  2926. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2927. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
  2928. ctx_isp->last_applied_req_id);
  2929. /* Slave RDI's frame starting post IFE EPOCH - Fatal */
  2930. if ((ctx_isp->substate_activated ==
  2931. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  2932. (ctx_isp->substate_activated ==
  2933. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED))
  2934. __cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp);
  2935. break;
  2936. case CAM_ISP_HW_SEC_EVENT_EPOCH:
  2937. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2938. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
  2939. ctx_isp->last_applied_req_id);
  2940. /*
  2941. * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP
  2942. * On such occasions use CSID CAMIF EPOCH for bubble detection, flag
  2943. * on detection and perform necessary bubble recovery
  2944. */
  2945. if ((ctx_isp->substate_activated ==
  2946. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  2947. (ctx_isp->substate_activated ==
  2948. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
  2949. recover = true;
  2950. CAM_WARN(CAM_ISP,
  2951. "Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  2952. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  2953. }
  2954. break;
  2955. case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP:
  2956. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2957. CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
  2958. ctx_isp->last_applied_req_id);
  2959. /* Avoid recovery loop if frame is dropped at stream on */
  2960. if (!ctx_isp->frame_id) {
  2961. CAM_ERR(CAM_ISP,
  2962. "Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld",
  2963. ctx->ctx_id, ctx->link_hdl,
  2964. ctx_isp->frame_id, ctx_isp->last_applied_req_id);
  2965. rc = -EPERM;
  2966. break;
  2967. }
  2968. recover = true;
  2969. sync_frame_drop = true;
  2970. CAM_WARN(CAM_ISP,
  2971. "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  2972. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  2973. break;
  2974. default:
  2975. break;
  2976. }
  2977. if (recover && ctx_isp->do_internal_recovery)
  2978. rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp);
  2979. end:
  2980. return rc;
  2981. }
  2982. static struct cam_isp_ctx_irq_ops
  2983. cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  2984. /* SOF */
  2985. {
  2986. .irq_ops = {
  2987. __cam_isp_ctx_handle_error,
  2988. __cam_isp_ctx_sof_in_activated_state,
  2989. __cam_isp_ctx_reg_upd_in_sof,
  2990. __cam_isp_ctx_notify_sof_in_activated_state,
  2991. __cam_isp_ctx_notify_eof_in_activated_state,
  2992. NULL,
  2993. __cam_isp_ctx_handle_secondary_events,
  2994. },
  2995. },
  2996. /* APPLIED */
  2997. {
  2998. .irq_ops = {
  2999. __cam_isp_ctx_handle_error,
  3000. __cam_isp_ctx_sof_in_activated_state,
  3001. __cam_isp_ctx_reg_upd_in_applied_state,
  3002. __cam_isp_ctx_epoch_in_applied,
  3003. __cam_isp_ctx_notify_eof_in_activated_state,
  3004. __cam_isp_ctx_buf_done_in_applied,
  3005. __cam_isp_ctx_handle_secondary_events,
  3006. },
  3007. },
  3008. /* EPOCH */
  3009. {
  3010. .irq_ops = {
  3011. __cam_isp_ctx_handle_error,
  3012. __cam_isp_ctx_sof_in_epoch,
  3013. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3014. __cam_isp_ctx_notify_sof_in_activated_state,
  3015. __cam_isp_ctx_notify_eof_in_activated_state,
  3016. __cam_isp_ctx_buf_done_in_epoch,
  3017. __cam_isp_ctx_handle_secondary_events,
  3018. },
  3019. },
  3020. /* BUBBLE */
  3021. {
  3022. .irq_ops = {
  3023. __cam_isp_ctx_handle_error,
  3024. __cam_isp_ctx_sof_in_activated_state,
  3025. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3026. __cam_isp_ctx_notify_sof_in_activated_state,
  3027. __cam_isp_ctx_notify_eof_in_activated_state,
  3028. __cam_isp_ctx_buf_done_in_bubble,
  3029. __cam_isp_ctx_handle_secondary_events,
  3030. },
  3031. },
  3032. /* Bubble Applied */
  3033. {
  3034. .irq_ops = {
  3035. __cam_isp_ctx_handle_error,
  3036. __cam_isp_ctx_sof_in_activated_state,
  3037. __cam_isp_ctx_reg_upd_in_applied_state,
  3038. __cam_isp_ctx_epoch_in_bubble_applied,
  3039. NULL,
  3040. __cam_isp_ctx_buf_done_in_bubble_applied,
  3041. __cam_isp_ctx_handle_secondary_events,
  3042. },
  3043. },
  3044. /* HW ERROR */
  3045. {
  3046. .irq_ops = {
  3047. NULL,
  3048. __cam_isp_ctx_sof_in_activated_state,
  3049. __cam_isp_ctx_reg_upd_in_hw_error,
  3050. NULL,
  3051. NULL,
  3052. NULL,
  3053. },
  3054. },
  3055. /* HALT */
  3056. {
  3057. },
  3058. };
  3059. static struct cam_isp_ctx_irq_ops
  3060. cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3061. /* SOF */
  3062. {
  3063. .irq_ops = {
  3064. __cam_isp_ctx_handle_error,
  3065. __cam_isp_ctx_fs2_sof_in_sof_state,
  3066. __cam_isp_ctx_fs2_reg_upd_in_sof,
  3067. __cam_isp_ctx_fs2_sof_in_sof_state,
  3068. __cam_isp_ctx_notify_eof_in_activated_state,
  3069. NULL,
  3070. },
  3071. },
  3072. /* APPLIED */
  3073. {
  3074. .irq_ops = {
  3075. __cam_isp_ctx_handle_error,
  3076. __cam_isp_ctx_sof_in_activated_state,
  3077. __cam_isp_ctx_fs2_reg_upd_in_applied_state,
  3078. __cam_isp_ctx_epoch_in_applied,
  3079. __cam_isp_ctx_notify_eof_in_activated_state,
  3080. __cam_isp_ctx_fs2_buf_done_in_applied,
  3081. },
  3082. },
  3083. /* EPOCH */
  3084. {
  3085. .irq_ops = {
  3086. __cam_isp_ctx_handle_error,
  3087. __cam_isp_ctx_sof_in_epoch,
  3088. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3089. __cam_isp_ctx_notify_sof_in_activated_state,
  3090. __cam_isp_ctx_notify_eof_in_activated_state,
  3091. __cam_isp_ctx_fs2_buf_done_in_epoch,
  3092. },
  3093. },
  3094. /* BUBBLE */
  3095. {
  3096. .irq_ops = {
  3097. __cam_isp_ctx_handle_error,
  3098. __cam_isp_ctx_sof_in_activated_state,
  3099. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3100. __cam_isp_ctx_notify_sof_in_activated_state,
  3101. __cam_isp_ctx_notify_eof_in_activated_state,
  3102. __cam_isp_ctx_buf_done_in_bubble,
  3103. },
  3104. },
  3105. /* Bubble Applied */
  3106. {
  3107. .irq_ops = {
  3108. __cam_isp_ctx_handle_error,
  3109. __cam_isp_ctx_sof_in_activated_state,
  3110. __cam_isp_ctx_reg_upd_in_applied_state,
  3111. __cam_isp_ctx_epoch_in_bubble_applied,
  3112. NULL,
  3113. __cam_isp_ctx_buf_done_in_bubble_applied,
  3114. },
  3115. },
  3116. /* HW ERROR */
  3117. {
  3118. .irq_ops = {
  3119. NULL,
  3120. __cam_isp_ctx_sof_in_activated_state,
  3121. __cam_isp_ctx_reg_upd_in_hw_error,
  3122. NULL,
  3123. NULL,
  3124. NULL,
  3125. },
  3126. },
  3127. /* HALT */
  3128. {
  3129. },
  3130. };
  3131. static struct cam_isp_ctx_irq_ops
  3132. cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3133. /* SOF */
  3134. {
  3135. .irq_ops = {
  3136. __cam_isp_ctx_handle_error,
  3137. NULL,
  3138. NULL,
  3139. NULL,
  3140. NULL,
  3141. NULL,
  3142. },
  3143. },
  3144. /* APPLIED */
  3145. {
  3146. .irq_ops = {
  3147. __cam_isp_ctx_handle_error,
  3148. __cam_isp_ctx_sof_in_activated_state,
  3149. __cam_isp_ctx_reg_upd_in_applied_state,
  3150. NULL,
  3151. NULL,
  3152. __cam_isp_ctx_buf_done_in_applied,
  3153. },
  3154. },
  3155. /* EPOCH */
  3156. {
  3157. .irq_ops = {
  3158. __cam_isp_ctx_handle_error,
  3159. NULL,
  3160. NULL,
  3161. __cam_isp_ctx_offline_epoch_in_activated_state,
  3162. NULL,
  3163. __cam_isp_ctx_buf_done_in_epoch,
  3164. },
  3165. },
  3166. /* BUBBLE */
  3167. {
  3168. },
  3169. /* Bubble Applied */
  3170. {
  3171. },
  3172. /* HW ERROR */
  3173. {
  3174. .irq_ops = {
  3175. NULL,
  3176. __cam_isp_ctx_sof_in_activated_state,
  3177. __cam_isp_ctx_reg_upd_in_hw_error,
  3178. NULL,
  3179. NULL,
  3180. NULL,
  3181. },
  3182. },
  3183. /* HALT */
  3184. {
  3185. },
  3186. };
  3187. static int __cam_isp_ctx_apply_req_in_activated_state(
  3188. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
  3189. enum cam_isp_ctx_activated_substate next_state)
  3190. {
  3191. int rc = 0;
  3192. struct cam_ctx_request *req;
  3193. struct cam_ctx_request *active_req = NULL;
  3194. struct cam_isp_ctx_req *req_isp;
  3195. struct cam_isp_ctx_req *active_req_isp;
  3196. struct cam_isp_context *ctx_isp = NULL;
  3197. struct cam_hw_config_args cfg = {0};
  3198. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3199. if (apply->re_apply)
  3200. if (apply->request_id <= ctx_isp->last_applied_req_id) {
  3201. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3202. "ctx_id:%d Trying to reapply the same request %llu again",
  3203. ctx->ctx_id,
  3204. apply->request_id);
  3205. return 0;
  3206. }
  3207. if (list_empty(&ctx->pending_req_list)) {
  3208. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3209. "ctx_id:%d No available request for Apply id %lld",
  3210. ctx->ctx_id,
  3211. apply->request_id);
  3212. rc = -EFAULT;
  3213. goto end;
  3214. }
  3215. /*
  3216. * When the pipeline has issue, the requests can be queued up in the
  3217. * pipeline. In this case, we should reject the additional request.
  3218. * The maximum number of request allowed to be outstanding is 2.
  3219. *
  3220. */
  3221. if (atomic_read(&ctx_isp->process_bubble)) {
  3222. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3223. "ctx_id:%d Processing bubble cannot apply Request Id %llu",
  3224. ctx->ctx_id,
  3225. apply->request_id);
  3226. rc = -EAGAIN;
  3227. goto end;
  3228. }
  3229. spin_lock_bh(&ctx->lock);
  3230. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  3231. list);
  3232. spin_unlock_bh(&ctx->lock);
  3233. /*
  3234. * Check whether the request id is matching the tip, if not, this means
  3235. * we are in the middle of the error handling. Need to reject this apply
  3236. */
  3237. if (req->request_id != apply->request_id) {
  3238. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3239. "ctx_id:%d Invalid Request Id asking %llu existing %llu",
  3240. ctx->ctx_id,
  3241. apply->request_id, req->request_id);
  3242. rc = -EFAULT;
  3243. goto end;
  3244. }
  3245. CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
  3246. req->request_id,
  3247. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
  3248. ctx->ctx_id);
  3249. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3250. if (ctx_isp->active_req_cnt >= 2) {
  3251. CAM_WARN_RATE_LIMIT(CAM_ISP,
  3252. "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u",
  3253. req->request_id,
  3254. ctx_isp->active_req_cnt,
  3255. ctx->ctx_id);
  3256. spin_lock_bh(&ctx->lock);
  3257. if (!list_empty(&ctx->active_req_list))
  3258. active_req = list_first_entry(&ctx->active_req_list,
  3259. struct cam_ctx_request, list);
  3260. else
  3261. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3262. "WARNING: should not happen (cnt = %d) but active_list empty",
  3263. ctx_isp->active_req_cnt);
  3264. spin_unlock_bh(&ctx->lock);
  3265. if (active_req) {
  3266. active_req_isp =
  3267. (struct cam_isp_ctx_req *) active_req->req_priv;
  3268. __cam_isp_ctx_handle_buf_done_fail_log(
  3269. active_req->request_id, active_req_isp,
  3270. ctx_isp->isp_device_type);
  3271. }
  3272. rc = -EFAULT;
  3273. goto end;
  3274. }
  3275. req_isp->bubble_report = apply->report_if_bubble;
  3276. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3277. cfg.request_id = req->request_id;
  3278. cfg.hw_update_entries = req_isp->cfg;
  3279. cfg.num_hw_update_entries = req_isp->num_cfg;
  3280. cfg.priv = &req_isp->hw_update_data;
  3281. cfg.init_packet = 0;
  3282. cfg.reapply_type = req_isp->reapply_type;
  3283. cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
  3284. atomic_set(&ctx_isp->apply_in_progress, 1);
  3285. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  3286. if (!rc) {
  3287. spin_lock_bh(&ctx->lock);
  3288. ctx_isp->substate_activated = next_state;
  3289. ctx_isp->last_applied_req_id = apply->request_id;
  3290. list_del_init(&req->list);
  3291. list_add_tail(&req->list, &ctx->wait_req_list);
  3292. CAM_DBG(CAM_ISP, "new substate Substate[%s], applied req %lld",
  3293. __cam_isp_ctx_substate_val_to_type(next_state),
  3294. ctx_isp->last_applied_req_id);
  3295. spin_unlock_bh(&ctx->lock);
  3296. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3297. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  3298. req->request_id);
  3299. __cam_isp_ctx_update_event_record(ctx_isp,
  3300. CAM_ISP_CTX_EVENT_APPLY, req);
  3301. } else if (rc == -EALREADY) {
  3302. spin_lock_bh(&ctx->lock);
  3303. req_isp->bubble_detected = true;
  3304. req_isp->cdm_reset_before_apply = false;
  3305. atomic_set(&ctx_isp->process_bubble, 1);
  3306. list_del_init(&req->list);
  3307. list_add(&req->list, &ctx->active_req_list);
  3308. ctx_isp->active_req_cnt++;
  3309. spin_unlock_bh(&ctx->lock);
  3310. CAM_DBG(CAM_REQ,
  3311. "move request %lld to active list(cnt = %d), ctx %u",
  3312. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  3313. } else {
  3314. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3315. "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
  3316. ctx->ctx_id, apply->request_id, rc);
  3317. }
  3318. atomic_set(&ctx_isp->apply_in_progress, 0);
  3319. end:
  3320. return rc;
  3321. }
  3322. static int __cam_isp_ctx_apply_req_in_sof(
  3323. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3324. {
  3325. int rc = 0;
  3326. struct cam_isp_context *ctx_isp =
  3327. (struct cam_isp_context *) ctx->ctx_priv;
  3328. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3329. __cam_isp_ctx_substate_val_to_type(
  3330. ctx_isp->substate_activated));
  3331. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3332. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3333. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3334. __cam_isp_ctx_substate_val_to_type(
  3335. ctx_isp->substate_activated));
  3336. if (rc)
  3337. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3338. __cam_isp_ctx_substate_val_to_type(
  3339. ctx_isp->substate_activated), rc);
  3340. return rc;
  3341. }
  3342. static int __cam_isp_ctx_apply_req_in_epoch(
  3343. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3344. {
  3345. int rc = 0;
  3346. struct cam_isp_context *ctx_isp =
  3347. (struct cam_isp_context *) ctx->ctx_priv;
  3348. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3349. __cam_isp_ctx_substate_val_to_type(
  3350. ctx_isp->substate_activated));
  3351. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3352. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3353. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3354. __cam_isp_ctx_substate_val_to_type(
  3355. ctx_isp->substate_activated));
  3356. if (rc)
  3357. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3358. __cam_isp_ctx_substate_val_to_type(
  3359. ctx_isp->substate_activated), rc);
  3360. return rc;
  3361. }
  3362. static int __cam_isp_ctx_apply_req_in_bubble(
  3363. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3364. {
  3365. int rc = 0;
  3366. struct cam_isp_context *ctx_isp =
  3367. (struct cam_isp_context *) ctx->ctx_priv;
  3368. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3369. __cam_isp_ctx_substate_val_to_type(
  3370. ctx_isp->substate_activated));
  3371. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3372. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
  3373. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3374. __cam_isp_ctx_substate_val_to_type(
  3375. ctx_isp->substate_activated));
  3376. if (rc)
  3377. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3378. __cam_isp_ctx_substate_val_to_type(
  3379. ctx_isp->substate_activated), rc);
  3380. return rc;
  3381. }
  3382. static int __cam_isp_ctx_apply_default_req_settings(
  3383. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3384. {
  3385. int rc = 0;
  3386. struct cam_isp_context *isp_ctx =
  3387. (struct cam_isp_context *) ctx->ctx_priv;
  3388. struct cam_hw_cmd_args hw_cmd_args;
  3389. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  3390. hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx;
  3391. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  3392. isp_hw_cmd_args.cmd_type =
  3393. CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG;
  3394. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  3395. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  3396. &hw_cmd_args);
  3397. if (rc)
  3398. CAM_ERR(CAM_ISP,
  3399. "Failed to apply default settings rc %d", rc);
  3400. else
  3401. CAM_DBG(CAM_ISP, "Applied default settings rc %d", rc);
  3402. return rc;
  3403. }
  3404. static int __cam_isp_ctx_dump_req_info(
  3405. struct cam_context *ctx,
  3406. struct cam_ctx_request *req,
  3407. uintptr_t cpu_addr,
  3408. size_t buf_len,
  3409. size_t *offset)
  3410. {
  3411. int i, rc;
  3412. uint8_t *dst;
  3413. int32_t *addr, *start;
  3414. uint32_t min_len;
  3415. size_t remain_len;
  3416. struct cam_isp_ctx_req *req_isp;
  3417. struct cam_isp_context *ctx_isp;
  3418. struct cam_isp_context_dump_header *hdr;
  3419. if (!req || !ctx || !offset || !cpu_addr || !buf_len) {
  3420. CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK %zu",
  3421. req, ctx, offset, buf_len);
  3422. return -EINVAL;
  3423. }
  3424. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3425. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  3426. if (buf_len <= *offset) {
  3427. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  3428. buf_len, *offset);
  3429. return -ENOSPC;
  3430. }
  3431. remain_len = buf_len - *offset;
  3432. min_len = sizeof(struct cam_isp_context_dump_header) +
  3433. (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS *
  3434. req_isp->num_fence_map_out *
  3435. sizeof(int32_t));
  3436. if (remain_len < min_len) {
  3437. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  3438. remain_len, min_len);
  3439. return -ENOSPC;
  3440. }
  3441. dst = (uint8_t *)cpu_addr + *offset;
  3442. hdr = (struct cam_isp_context_dump_header *)dst;
  3443. hdr->word_size = sizeof(int32_t);
  3444. scnprintf(hdr->tag, CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN,
  3445. "ISP_OUT_FENCE:");
  3446. addr = (int32_t *)(dst + sizeof(struct cam_isp_context_dump_header));
  3447. start = addr;
  3448. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3449. *addr++ = req_isp->fence_map_out[i].resource_handle;
  3450. *addr++ = req_isp->fence_map_out[i].sync_id;
  3451. }
  3452. hdr->size = hdr->word_size * (addr - start);
  3453. *offset += hdr->size + sizeof(struct cam_isp_context_dump_header);
  3454. rc = cam_isp_ctx_dump_req(req_isp, cpu_addr, buf_len,
  3455. offset, true);
  3456. return rc;
  3457. }
  3458. static int __cam_isp_ctx_dump_in_top_state(
  3459. struct cam_context *ctx,
  3460. struct cam_req_mgr_dump_info *dump_info)
  3461. {
  3462. int rc = 0;
  3463. bool dump_only_event_record = false;
  3464. size_t buf_len;
  3465. size_t remain_len;
  3466. uint8_t *dst;
  3467. ktime_t cur_time;
  3468. uint32_t min_len;
  3469. uint64_t diff;
  3470. uint64_t *addr, *start;
  3471. uintptr_t cpu_addr;
  3472. struct timespec64 ts;
  3473. struct cam_isp_context *ctx_isp;
  3474. struct cam_ctx_request *req = NULL;
  3475. struct cam_isp_ctx_req *req_isp;
  3476. struct cam_ctx_request *req_temp;
  3477. struct cam_hw_dump_args dump_args;
  3478. struct cam_isp_context_dump_header *hdr;
  3479. spin_lock_bh(&ctx->lock);
  3480. list_for_each_entry_safe(req, req_temp,
  3481. &ctx->active_req_list, list) {
  3482. if (req->request_id == dump_info->req_id) {
  3483. CAM_INFO(CAM_ISP, "isp dump active list req: %lld",
  3484. dump_info->req_id);
  3485. goto hw_dump;
  3486. }
  3487. }
  3488. list_for_each_entry_safe(req, req_temp,
  3489. &ctx->wait_req_list, list) {
  3490. if (req->request_id == dump_info->req_id) {
  3491. CAM_INFO(CAM_ISP, "isp dump wait list req: %lld",
  3492. dump_info->req_id);
  3493. goto hw_dump;
  3494. }
  3495. }
  3496. spin_unlock_bh(&ctx->lock);
  3497. return rc;
  3498. hw_dump:
  3499. rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
  3500. &cpu_addr, &buf_len);
  3501. if (rc) {
  3502. CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
  3503. dump_info->buf_handle, rc);
  3504. spin_unlock_bh(&ctx->lock);
  3505. return rc;
  3506. }
  3507. if (buf_len <= dump_info->offset) {
  3508. spin_unlock_bh(&ctx->lock);
  3509. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  3510. buf_len, dump_info->offset);
  3511. return -ENOSPC;
  3512. }
  3513. remain_len = buf_len - dump_info->offset;
  3514. min_len = sizeof(struct cam_isp_context_dump_header) +
  3515. (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
  3516. if (remain_len < min_len) {
  3517. spin_unlock_bh(&ctx->lock);
  3518. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  3519. remain_len, min_len);
  3520. return -ENOSPC;
  3521. }
  3522. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3523. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3524. cur_time = ktime_get();
  3525. diff = ktime_us_delta(
  3526. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY],
  3527. cur_time);
  3528. if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
  3529. CAM_INFO(CAM_ISP, "req %lld found no error",
  3530. req->request_id);
  3531. dump_only_event_record = true;
  3532. }
  3533. dst = (uint8_t *)cpu_addr + dump_info->offset;
  3534. hdr = (struct cam_isp_context_dump_header *)dst;
  3535. scnprintf(hdr->tag, CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN,
  3536. "ISP_CTX_DUMP:");
  3537. hdr->word_size = sizeof(uint64_t);
  3538. addr = (uint64_t *)(dst +
  3539. sizeof(struct cam_isp_context_dump_header));
  3540. start = addr;
  3541. *addr++ = req->request_id;
  3542. ts = ktime_to_timespec64(
  3543. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]);
  3544. *addr++ = ts.tv_sec;
  3545. *addr++ = ts.tv_nsec/NSEC_PER_USEC;
  3546. ts = ktime_to_timespec64(cur_time);
  3547. *addr++ = ts.tv_sec;
  3548. *addr++ = ts.tv_nsec/NSEC_PER_USEC;
  3549. hdr->size = hdr->word_size * (addr - start);
  3550. dump_info->offset += hdr->size +
  3551. sizeof(struct cam_isp_context_dump_header);
  3552. rc = __cam_isp_ctx_dump_event_record(ctx_isp, cpu_addr,
  3553. buf_len, &dump_info->offset);
  3554. if (rc) {
  3555. CAM_ERR(CAM_ISP, "Dump event fail %lld",
  3556. req->request_id);
  3557. spin_unlock_bh(&ctx->lock);
  3558. return rc;
  3559. }
  3560. if (dump_only_event_record) {
  3561. spin_unlock_bh(&ctx->lock);
  3562. return rc;
  3563. }
  3564. rc = __cam_isp_ctx_dump_req_info(ctx, req, cpu_addr,
  3565. buf_len, &dump_info->offset);
  3566. if (rc) {
  3567. CAM_ERR(CAM_ISP, "Dump Req info fail %lld",
  3568. req->request_id);
  3569. spin_unlock_bh(&ctx->lock);
  3570. return rc;
  3571. }
  3572. spin_unlock_bh(&ctx->lock);
  3573. if (ctx->hw_mgr_intf->hw_dump) {
  3574. dump_args.offset = dump_info->offset;
  3575. dump_args.request_id = dump_info->req_id;
  3576. dump_args.buf_handle = dump_info->buf_handle;
  3577. dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3578. rc = ctx->hw_mgr_intf->hw_dump(
  3579. ctx->hw_mgr_intf->hw_mgr_priv,
  3580. &dump_args);
  3581. dump_info->offset = dump_args.offset;
  3582. }
  3583. return rc;
  3584. }
  3585. static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
  3586. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  3587. {
  3588. int i, rc, tmp = 0;
  3589. uint32_t cancel_req_id_found = 0;
  3590. struct cam_ctx_request *req;
  3591. struct cam_ctx_request *req_temp;
  3592. struct cam_isp_ctx_req *req_isp;
  3593. struct list_head flush_list;
  3594. struct cam_isp_context *ctx_isp = NULL;
  3595. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3596. INIT_LIST_HEAD(&flush_list);
  3597. if (list_empty(req_list)) {
  3598. CAM_DBG(CAM_ISP, "request list is empty");
  3599. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  3600. CAM_INFO(CAM_ISP, "no request to cancel (last applied:%lld cancel:%lld)",
  3601. ctx_isp->last_applied_req_id, flush_req->req_id);
  3602. return -EINVAL;
  3603. } else
  3604. return 0;
  3605. }
  3606. CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
  3607. flush_req->type, flush_req->req_id);
  3608. list_for_each_entry_safe(req, req_temp, req_list, list) {
  3609. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  3610. if (req->request_id != flush_req->req_id) {
  3611. continue;
  3612. } else {
  3613. list_del_init(&req->list);
  3614. list_add_tail(&req->list, &flush_list);
  3615. cancel_req_id_found = 1;
  3616. __cam_isp_ctx_update_state_monitor_array(
  3617. ctx_isp,
  3618. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
  3619. req->request_id);
  3620. break;
  3621. }
  3622. }
  3623. list_del_init(&req->list);
  3624. list_add_tail(&req->list, &flush_list);
  3625. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3626. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id);
  3627. }
  3628. if (list_empty(&flush_list)) {
  3629. /*
  3630. * Maybe the req isn't sent to KMD since UMD already skip
  3631. * req in CSL layer.
  3632. */
  3633. CAM_INFO(CAM_ISP,
  3634. "flush list is empty, flush type %d for req %llu",
  3635. flush_req->type, flush_req->req_id);
  3636. return 0;
  3637. }
  3638. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  3639. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3640. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3641. if (req_isp->fence_map_out[i].sync_id != -1) {
  3642. CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
  3643. req->request_id,
  3644. req_isp->fence_map_out[i].sync_id);
  3645. rc = cam_sync_signal(
  3646. req_isp->fence_map_out[i].sync_id,
  3647. CAM_SYNC_STATE_SIGNALED_CANCEL,
  3648. CAM_SYNC_ISP_EVENT_FLUSH);
  3649. if (rc) {
  3650. tmp = req_isp->fence_map_out[i].sync_id;
  3651. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3652. "signal fence %d failed", tmp);
  3653. }
  3654. req_isp->fence_map_out[i].sync_id = -1;
  3655. }
  3656. }
  3657. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  3658. req_isp->cdm_reset_before_apply = false;
  3659. list_del_init(&req->list);
  3660. list_add_tail(&req->list, &ctx->free_req_list);
  3661. }
  3662. return 0;
  3663. }
  3664. static int __cam_isp_ctx_flush_req_in_top_state(
  3665. struct cam_context *ctx,
  3666. struct cam_req_mgr_flush_request *flush_req)
  3667. {
  3668. int rc = 0;
  3669. struct cam_isp_context *ctx_isp;
  3670. struct cam_isp_stop_args stop_isp;
  3671. struct cam_hw_stop_args stop_args;
  3672. struct cam_hw_reset_args reset_args;
  3673. struct cam_req_mgr_timer_notify timer;
  3674. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3675. CAM_DBG(CAM_ISP, "Flush pending list");
  3676. spin_lock_bh(&ctx->lock);
  3677. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  3678. spin_unlock_bh(&ctx->lock);
  3679. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  3680. if (ctx->state <= CAM_CTX_READY) {
  3681. ctx->state = CAM_CTX_ACQUIRED;
  3682. goto end;
  3683. }
  3684. spin_lock_bh(&ctx->lock);
  3685. ctx->state = CAM_CTX_FLUSHED;
  3686. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  3687. spin_unlock_bh(&ctx->lock);
  3688. CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%d",
  3689. flush_req->req_id, ctx->ctx_id);
  3690. ctx->last_flush_req = flush_req->req_id;
  3691. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx);
  3692. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3693. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  3694. stop_isp.stop_only = true;
  3695. stop_isp.internal_trigger = false;
  3696. stop_args.args = (void *)&stop_isp;
  3697. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  3698. &stop_args);
  3699. if (rc)
  3700. CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
  3701. rc);
  3702. CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.");
  3703. CAM_DBG(CAM_ISP, "Flush wait and active lists");
  3704. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  3705. timer.link_hdl = ctx->link_hdl;
  3706. timer.dev_hdl = ctx->dev_hdl;
  3707. timer.state = false;
  3708. ctx->ctx_crm_intf->notify_timer(&timer);
  3709. }
  3710. spin_lock_bh(&ctx->lock);
  3711. if (!list_empty(&ctx->wait_req_list))
  3712. rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
  3713. flush_req);
  3714. if (!list_empty(&ctx->active_req_list))
  3715. rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
  3716. flush_req);
  3717. ctx_isp->active_req_cnt = 0;
  3718. spin_unlock_bh(&ctx->lock);
  3719. reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3720. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  3721. &reset_args);
  3722. if (rc)
  3723. CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
  3724. ctx_isp->init_received = false;
  3725. }
  3726. end:
  3727. ctx_isp->bubble_frame_cnt = 0;
  3728. atomic_set(&ctx_isp->process_bubble, 0);
  3729. atomic_set(&ctx_isp->rxd_epoch, 0);
  3730. atomic_set(&ctx_isp->internal_recovery_set, 0);
  3731. return rc;
  3732. }
  3733. static int __cam_isp_ctx_flush_req_in_ready(
  3734. struct cam_context *ctx,
  3735. struct cam_req_mgr_flush_request *flush_req)
  3736. {
  3737. int rc = 0;
  3738. CAM_DBG(CAM_ISP, "try to flush pending list");
  3739. spin_lock_bh(&ctx->lock);
  3740. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  3741. /* if nothing is in pending req list, change state to acquire */
  3742. if (list_empty(&ctx->pending_req_list))
  3743. ctx->state = CAM_CTX_ACQUIRED;
  3744. spin_unlock_bh(&ctx->lock);
  3745. trace_cam_context_state("ISP", ctx);
  3746. CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
  3747. ctx->state);
  3748. return rc;
  3749. }
  3750. static struct cam_ctx_ops
  3751. cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3752. /* SOF */
  3753. {
  3754. .ioctl_ops = {},
  3755. .crm_ops = {
  3756. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  3757. .notify_frame_skip =
  3758. __cam_isp_ctx_apply_default_req_settings,
  3759. },
  3760. .irq_ops = NULL,
  3761. },
  3762. /* APPLIED */
  3763. {
  3764. .ioctl_ops = {},
  3765. .crm_ops = {},
  3766. .irq_ops = NULL,
  3767. },
  3768. /* EPOCH */
  3769. {
  3770. .ioctl_ops = {},
  3771. .crm_ops = {
  3772. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  3773. .notify_frame_skip =
  3774. __cam_isp_ctx_apply_default_req_settings,
  3775. },
  3776. .irq_ops = NULL,
  3777. },
  3778. /* BUBBLE */
  3779. {
  3780. .ioctl_ops = {},
  3781. .crm_ops = {
  3782. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  3783. .notify_frame_skip =
  3784. __cam_isp_ctx_apply_default_req_settings,
  3785. },
  3786. .irq_ops = NULL,
  3787. },
  3788. /* Bubble Applied */
  3789. {
  3790. .ioctl_ops = {},
  3791. .crm_ops = {},
  3792. .irq_ops = NULL,
  3793. },
  3794. /* HW ERROR */
  3795. {
  3796. .ioctl_ops = {},
  3797. .crm_ops = {},
  3798. .irq_ops = NULL,
  3799. },
  3800. /* HALT */
  3801. {
  3802. .ioctl_ops = {},
  3803. .crm_ops = {},
  3804. .irq_ops = NULL,
  3805. },
  3806. };
  3807. static struct cam_ctx_ops
  3808. cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3809. /* SOF */
  3810. {
  3811. .ioctl_ops = {},
  3812. .crm_ops = {
  3813. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  3814. },
  3815. .irq_ops = NULL,
  3816. },
  3817. /* APPLIED */
  3818. {
  3819. .ioctl_ops = {},
  3820. .crm_ops = {},
  3821. .irq_ops = NULL,
  3822. },
  3823. /* EPOCH */
  3824. {
  3825. .ioctl_ops = {},
  3826. .crm_ops = {
  3827. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  3828. },
  3829. .irq_ops = NULL,
  3830. },
  3831. /* BUBBLE */
  3832. {
  3833. .ioctl_ops = {},
  3834. .crm_ops = {
  3835. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  3836. },
  3837. .irq_ops = NULL,
  3838. },
  3839. /* Bubble Applied */
  3840. {
  3841. .ioctl_ops = {},
  3842. .crm_ops = {},
  3843. .irq_ops = NULL,
  3844. },
  3845. /* HW ERROR */
  3846. {
  3847. .ioctl_ops = {},
  3848. .crm_ops = {},
  3849. .irq_ops = NULL,
  3850. },
  3851. /* HALT */
  3852. {
  3853. .ioctl_ops = {},
  3854. .crm_ops = {},
  3855. .irq_ops = NULL,
  3856. },
  3857. };
  3858. static int __cam_isp_ctx_rdi_only_sof_in_top_state(
  3859. struct cam_isp_context *ctx_isp, void *evt_data)
  3860. {
  3861. int rc = 0;
  3862. struct cam_context *ctx = ctx_isp->base;
  3863. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3864. uint64_t request_id = 0;
  3865. if (!evt_data) {
  3866. CAM_ERR(CAM_ISP, "in valid sof event data");
  3867. return -EINVAL;
  3868. }
  3869. ctx_isp->frame_id++;
  3870. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3871. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3872. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3873. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3874. /*
  3875. * notify reqmgr with sof signal. Note, due to scheduling delay
  3876. * we can run into situation that two active requests has already
  3877. * be in the active queue while we try to do the notification.
  3878. * In this case, we need to skip the current notification. This
  3879. * helps the state machine to catch up the delay.
  3880. */
  3881. if (ctx_isp->active_req_cnt <= 2) {
  3882. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3883. /*
  3884. * It's possible for rup done to be processed before
  3885. * SOF, check for first active request shutter here
  3886. */
  3887. if (!list_empty(&ctx->active_req_list)) {
  3888. struct cam_ctx_request *req = NULL;
  3889. req = list_first_entry(&ctx->active_req_list,
  3890. struct cam_ctx_request, list);
  3891. if (req->request_id > ctx_isp->reported_req_id) {
  3892. request_id = req->request_id;
  3893. ctx_isp->reported_req_id = request_id;
  3894. }
  3895. }
  3896. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3897. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3898. } else {
  3899. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  3900. }
  3901. if (list_empty(&ctx->active_req_list))
  3902. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  3903. else
  3904. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  3905. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3906. __cam_isp_ctx_substate_val_to_type(
  3907. ctx_isp->substate_activated));
  3908. return rc;
  3909. }
  3910. static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
  3911. struct cam_isp_context *ctx_isp, void *evt_data)
  3912. {
  3913. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3914. if (!evt_data) {
  3915. CAM_ERR(CAM_ISP, "in valid sof event data");
  3916. return -EINVAL;
  3917. }
  3918. ctx_isp->frame_id++;
  3919. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3920. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3921. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3922. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3923. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
  3924. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3925. __cam_isp_ctx_substate_val_to_type(
  3926. ctx_isp->substate_activated));
  3927. return 0;
  3928. }
  3929. static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
  3930. struct cam_isp_context *ctx_isp, void *evt_data)
  3931. {
  3932. struct cam_ctx_request *req;
  3933. struct cam_isp_ctx_req *req_isp;
  3934. struct cam_context *ctx = ctx_isp->base;
  3935. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3936. uint64_t request_id = 0;
  3937. /*
  3938. * Sof in bubble applied state means, reg update not received.
  3939. * before increment frame id and override time stamp value, send
  3940. * the previous sof time stamp that got captured in the
  3941. * sof in applied state.
  3942. */
  3943. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3944. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3945. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3946. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3947. ctx_isp->frame_id++;
  3948. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3949. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3950. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3951. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3952. if (list_empty(&ctx->wait_req_list)) {
  3953. /*
  3954. * If no pending req in epoch, this is an error case.
  3955. * The recovery is to go back to sof state
  3956. */
  3957. CAM_ERR(CAM_ISP, "No wait request");
  3958. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  3959. /* Send SOF event as empty frame*/
  3960. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3961. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3962. goto end;
  3963. }
  3964. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  3965. list);
  3966. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3967. req_isp->bubble_detected = true;
  3968. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  3969. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  3970. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  3971. req_isp->cdm_reset_before_apply = false;
  3972. if (req_isp->bubble_report) {
  3973. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  3974. req->request_id, ctx_isp);
  3975. atomic_set(&ctx_isp->process_bubble, 1);
  3976. } else {
  3977. req_isp->bubble_report = 0;
  3978. }
  3979. /*
  3980. * Always move the request to active list. Let buf done
  3981. * function handles the rest.
  3982. */
  3983. list_del_init(&req->list);
  3984. list_add_tail(&req->list, &ctx->active_req_list);
  3985. ctx_isp->active_req_cnt++;
  3986. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
  3987. req->request_id, ctx_isp->active_req_cnt);
  3988. if (!req_isp->bubble_report) {
  3989. if (req->request_id > ctx_isp->reported_req_id) {
  3990. request_id = req->request_id;
  3991. ctx_isp->reported_req_id = request_id;
  3992. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3993. CAM_REQ_MGR_SOF_EVENT_ERROR);
  3994. } else
  3995. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3996. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3997. } else
  3998. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3999. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4000. /* change the state to bubble, as reg update has not come */
  4001. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  4002. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4003. __cam_isp_ctx_substate_val_to_type(
  4004. ctx_isp->substate_activated));
  4005. end:
  4006. return 0;
  4007. }
  4008. static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
  4009. struct cam_isp_context *ctx_isp, void *evt_data)
  4010. {
  4011. uint32_t i;
  4012. struct cam_ctx_request *req;
  4013. struct cam_context *ctx = ctx_isp->base;
  4014. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4015. struct cam_isp_ctx_req *req_isp;
  4016. struct cam_hw_cmd_args hw_cmd_args;
  4017. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4018. uint64_t request_id = 0;
  4019. uint64_t last_cdm_done_req = 0;
  4020. int rc = 0;
  4021. if (!evt_data) {
  4022. CAM_ERR(CAM_ISP, "in valid sof event data");
  4023. return -EINVAL;
  4024. }
  4025. ctx_isp->frame_id++;
  4026. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  4027. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  4028. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4029. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4030. if (atomic_read(&ctx_isp->process_bubble)) {
  4031. if (list_empty(&ctx->active_req_list)) {
  4032. CAM_ERR(CAM_ISP, "No available active req in bubble");
  4033. atomic_set(&ctx_isp->process_bubble, 0);
  4034. return -EINVAL;
  4035. }
  4036. if (ctx_isp->last_sof_timestamp ==
  4037. ctx_isp->sof_timestamp_val) {
  4038. CAM_DBG(CAM_ISP,
  4039. "Tasklet delay detected! Bubble frame: %lld check skipped, sof_timestamp: %lld, ctx_id: %d",
  4040. ctx_isp->frame_id,
  4041. ctx_isp->sof_timestamp_val,
  4042. ctx->ctx_id);
  4043. goto end;
  4044. }
  4045. req = list_first_entry(&ctx->active_req_list,
  4046. struct cam_ctx_request, list);
  4047. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4048. if (req_isp->bubble_detected) {
  4049. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4050. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4051. isp_hw_cmd_args.cmd_type =
  4052. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  4053. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4054. rc = ctx->hw_mgr_intf->hw_cmd(
  4055. ctx->hw_mgr_intf->hw_mgr_priv,
  4056. &hw_cmd_args);
  4057. if (rc) {
  4058. CAM_ERR(CAM_ISP, "HW command failed");
  4059. return rc;
  4060. }
  4061. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  4062. CAM_DBG(CAM_ISP, "last_cdm_done req: %d ctx_id: %d",
  4063. last_cdm_done_req, ctx->ctx_id);
  4064. if (last_cdm_done_req >= req->request_id) {
  4065. CAM_DBG(CAM_ISP,
  4066. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  4067. req->request_id);
  4068. goto end;
  4069. } else {
  4070. CAM_WARN(CAM_ISP,
  4071. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  4072. req->request_id);
  4073. req_isp->num_acked = 0;
  4074. req_isp->num_deferred_acks = 0;
  4075. req_isp->bubble_detected = false;
  4076. req_isp->cdm_reset_before_apply = true;
  4077. list_del_init(&req->list);
  4078. list_add(&req->list, &ctx->pending_req_list);
  4079. atomic_set(&ctx_isp->process_bubble, 0);
  4080. ctx_isp->active_req_cnt--;
  4081. CAM_DBG(CAM_REQ,
  4082. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  4083. req->request_id,
  4084. ctx_isp->active_req_cnt, ctx->ctx_id);
  4085. }
  4086. goto end;
  4087. }
  4088. }
  4089. /*
  4090. * Signal all active requests with error and move the all the active
  4091. * requests to free list
  4092. */
  4093. while (!list_empty(&ctx->active_req_list)) {
  4094. req = list_first_entry(&ctx->active_req_list,
  4095. struct cam_ctx_request, list);
  4096. list_del_init(&req->list);
  4097. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4098. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  4099. req_isp->num_fence_map_out);
  4100. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4101. if (req_isp->fence_map_out[i].sync_id != -1) {
  4102. cam_sync_signal(
  4103. req_isp->fence_map_out[i].sync_id,
  4104. CAM_SYNC_STATE_SIGNALED_ERROR,
  4105. CAM_SYNC_ISP_EVENT_BUBBLE);
  4106. }
  4107. list_add_tail(&req->list, &ctx->free_req_list);
  4108. ctx_isp->active_req_cnt--;
  4109. }
  4110. end:
  4111. /* notify reqmgr with sof signal */
  4112. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4113. /*
  4114. * It is idle frame with out any applied request id, send
  4115. * request id as zero
  4116. */
  4117. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4118. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4119. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4120. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4121. __cam_isp_ctx_substate_val_to_type(
  4122. ctx_isp->substate_activated));
  4123. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  4124. return 0;
  4125. }
  4126. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state(
  4127. struct cam_isp_context *ctx_isp, void *evt_data)
  4128. {
  4129. struct cam_ctx_request *req = NULL;
  4130. struct cam_context *ctx = ctx_isp->base;
  4131. req = list_first_entry(&ctx->active_req_list,
  4132. struct cam_ctx_request, list);
  4133. CAM_INFO(CAM_ISP, "Received RUP for Bubble Request", req->request_id);
  4134. return 0;
  4135. }
  4136. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
  4137. struct cam_isp_context *ctx_isp, void *evt_data)
  4138. {
  4139. struct cam_ctx_request *req = NULL;
  4140. struct cam_context *ctx = ctx_isp->base;
  4141. struct cam_isp_ctx_req *req_isp;
  4142. uint64_t request_id = 0;
  4143. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  4144. /* notify reqmgr with sof signal*/
  4145. if (list_empty(&ctx->wait_req_list)) {
  4146. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  4147. goto error;
  4148. }
  4149. req = list_first_entry(&ctx->wait_req_list,
  4150. struct cam_ctx_request, list);
  4151. list_del_init(&req->list);
  4152. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4153. request_id =
  4154. (req_isp->hw_update_data.packet_opcode_type ==
  4155. CAM_ISP_PACKET_INIT_DEV) ? 0 : req->request_id;
  4156. if (req_isp->num_fence_map_out != 0) {
  4157. list_add_tail(&req->list, &ctx->active_req_list);
  4158. ctx_isp->active_req_cnt++;
  4159. CAM_DBG(CAM_ISP,
  4160. "move request %lld to active list(cnt = %d)",
  4161. req->request_id, ctx_isp->active_req_cnt);
  4162. /* if packet has buffers, set correct request id */
  4163. request_id = req->request_id;
  4164. } else {
  4165. /* no io config, so the request is completed. */
  4166. list_add_tail(&req->list, &ctx->free_req_list);
  4167. CAM_DBG(CAM_ISP,
  4168. "move active req %lld to free list(cnt=%d)",
  4169. req->request_id, ctx_isp->active_req_cnt);
  4170. }
  4171. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4172. if (request_id)
  4173. ctx_isp->reported_req_id = request_id;
  4174. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4175. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4176. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4177. __cam_isp_ctx_substate_val_to_type(
  4178. ctx_isp->substate_activated));
  4179. __cam_isp_ctx_update_event_record(ctx_isp,
  4180. CAM_ISP_CTX_EVENT_RUP, req);
  4181. return 0;
  4182. error:
  4183. /* Send SOF event as idle frame*/
  4184. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4185. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4186. __cam_isp_ctx_update_event_record(ctx_isp,
  4187. CAM_ISP_CTX_EVENT_RUP, NULL);
  4188. /*
  4189. * There is no request in the pending list, move the sub state machine
  4190. * to SOF sub state
  4191. */
  4192. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4193. return 0;
  4194. }
  4195. static struct cam_isp_ctx_irq_ops
  4196. cam_isp_ctx_rdi_only_activated_state_machine_irq
  4197. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  4198. /* SOF */
  4199. {
  4200. .irq_ops = {
  4201. NULL,
  4202. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4203. __cam_isp_ctx_reg_upd_in_sof,
  4204. NULL,
  4205. NULL,
  4206. NULL,
  4207. },
  4208. },
  4209. /* APPLIED */
  4210. {
  4211. .irq_ops = {
  4212. __cam_isp_ctx_handle_error,
  4213. __cam_isp_ctx_rdi_only_sof_in_applied_state,
  4214. __cam_isp_ctx_reg_upd_in_applied_state,
  4215. NULL,
  4216. NULL,
  4217. __cam_isp_ctx_buf_done_in_applied,
  4218. },
  4219. },
  4220. /* EPOCH */
  4221. {
  4222. .irq_ops = {
  4223. __cam_isp_ctx_handle_error,
  4224. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4225. NULL,
  4226. NULL,
  4227. NULL,
  4228. __cam_isp_ctx_buf_done_in_epoch,
  4229. },
  4230. },
  4231. /* BUBBLE*/
  4232. {
  4233. .irq_ops = {
  4234. __cam_isp_ctx_handle_error,
  4235. __cam_isp_ctx_rdi_only_sof_in_bubble_state,
  4236. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state,
  4237. NULL,
  4238. NULL,
  4239. __cam_isp_ctx_buf_done_in_bubble,
  4240. },
  4241. },
  4242. /* BUBBLE APPLIED ie PRE_BUBBLE */
  4243. {
  4244. .irq_ops = {
  4245. __cam_isp_ctx_handle_error,
  4246. __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
  4247. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
  4248. NULL,
  4249. NULL,
  4250. __cam_isp_ctx_buf_done_in_bubble_applied,
  4251. },
  4252. },
  4253. /* HW ERROR */
  4254. {
  4255. },
  4256. /* HALT */
  4257. {
  4258. },
  4259. };
  4260. static int __cam_isp_ctx_rdi_only_apply_req_top_state(
  4261. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4262. {
  4263. int rc = 0;
  4264. struct cam_isp_context *ctx_isp =
  4265. (struct cam_isp_context *) ctx->ctx_priv;
  4266. CAM_DBG(CAM_ISP, "current Substate[%s]",
  4267. __cam_isp_ctx_substate_val_to_type(
  4268. ctx_isp->substate_activated));
  4269. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4270. CAM_ISP_CTX_ACTIVATED_APPLIED);
  4271. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4272. __cam_isp_ctx_substate_val_to_type(
  4273. ctx_isp->substate_activated));
  4274. if (rc)
  4275. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4276. "ctx_id:%d Apply failed in Substate[%s], rc %d",
  4277. ctx->ctx_id,
  4278. __cam_isp_ctx_substate_val_to_type(
  4279. ctx_isp->substate_activated), rc);
  4280. return rc;
  4281. }
  4282. static struct cam_ctx_ops
  4283. cam_isp_ctx_rdi_only_activated_state_machine
  4284. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  4285. /* SOF */
  4286. {
  4287. .ioctl_ops = {},
  4288. .crm_ops = {
  4289. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  4290. },
  4291. .irq_ops = NULL,
  4292. },
  4293. /* APPLIED */
  4294. {
  4295. .ioctl_ops = {},
  4296. .crm_ops = {},
  4297. .irq_ops = NULL,
  4298. },
  4299. /* EPOCH */
  4300. {
  4301. .ioctl_ops = {},
  4302. .crm_ops = {
  4303. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  4304. },
  4305. .irq_ops = NULL,
  4306. },
  4307. /* PRE BUBBLE */
  4308. {
  4309. .ioctl_ops = {},
  4310. .crm_ops = {},
  4311. .irq_ops = NULL,
  4312. },
  4313. /* BUBBLE */
  4314. {
  4315. .ioctl_ops = {},
  4316. .crm_ops = {},
  4317. .irq_ops = NULL,
  4318. },
  4319. /* HW ERROR */
  4320. {
  4321. .ioctl_ops = {},
  4322. .crm_ops = {},
  4323. .irq_ops = NULL,
  4324. },
  4325. /* HALT */
  4326. {
  4327. .ioctl_ops = {},
  4328. .crm_ops = {},
  4329. .irq_ops = NULL,
  4330. },
  4331. };
  4332. static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
  4333. struct cam_flush_dev_cmd *cmd)
  4334. {
  4335. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  4336. struct cam_req_mgr_flush_request flush_req;
  4337. if (!ctx_isp->offline_context) {
  4338. CAM_ERR(CAM_ISP, "flush dev only supported in offline context");
  4339. return -EINVAL;
  4340. }
  4341. flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL :
  4342. CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ;
  4343. flush_req.req_id = cmd->req_id;
  4344. CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu)", flush_req.type, flush_req.req_id);
  4345. switch (ctx->state) {
  4346. case CAM_CTX_ACQUIRED:
  4347. case CAM_CTX_ACTIVATED:
  4348. return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req);
  4349. case CAM_CTX_READY:
  4350. return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req);
  4351. default:
  4352. CAM_ERR(CAM_ISP, "flush dev in wrong state: %d", ctx->state);
  4353. return -EINVAL;
  4354. }
  4355. if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
  4356. cam_req_mgr_workq_flush(ctx_isp->workq);
  4357. }
  4358. static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
  4359. {
  4360. kfree(ctx->out_map_entries);
  4361. kfree(ctx->in_map_entries);
  4362. kfree(ctx->hw_update_entry);
  4363. ctx->out_map_entries = NULL;
  4364. ctx->in_map_entries = NULL;
  4365. ctx->hw_update_entry = NULL;
  4366. ctx->max_out_map_entries = 0;
  4367. ctx->max_in_map_entries = 0;
  4368. ctx->max_hw_update_entries = 0;
  4369. }
  4370. static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
  4371. void *cmd)
  4372. {
  4373. int rc = 0;
  4374. struct cam_hw_release_args rel_arg;
  4375. struct cam_isp_context *ctx_isp =
  4376. (struct cam_isp_context *) ctx->ctx_priv;
  4377. struct cam_req_mgr_flush_request flush_req;
  4378. int i;
  4379. if (ctx_isp->hw_ctx) {
  4380. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4381. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  4382. &rel_arg);
  4383. ctx_isp->hw_ctx = NULL;
  4384. } else {
  4385. CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u]", ctx->ctx_id);
  4386. }
  4387. ctx->last_flush_req = 0;
  4388. ctx_isp->custom_enabled = false;
  4389. ctx_isp->use_frame_header_ts = false;
  4390. ctx_isp->use_default_apply = false;
  4391. ctx_isp->frame_id = 0;
  4392. ctx_isp->active_req_cnt = 0;
  4393. ctx_isp->reported_req_id = 0;
  4394. ctx_isp->hw_acquired = false;
  4395. ctx_isp->init_received = false;
  4396. ctx_isp->support_consumed_addr = false;
  4397. ctx_isp->aeb_enabled = false;
  4398. ctx_isp->do_internal_recovery = false;
  4399. ctx_isp->req_info.last_bufdone_req_id = 0;
  4400. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4401. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4402. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4403. /*
  4404. * Ideally, we should never have any active request here.
  4405. * But we still add some sanity check code here to help the debug
  4406. */
  4407. if (!list_empty(&ctx->active_req_list))
  4408. CAM_WARN(CAM_ISP, "Active list is not empty");
  4409. /* Flush all the pending request list */
  4410. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  4411. flush_req.link_hdl = ctx->link_hdl;
  4412. flush_req.dev_hdl = ctx->dev_hdl;
  4413. flush_req.req_id = 0;
  4414. CAM_DBG(CAM_ISP, "try to flush pending list");
  4415. spin_lock_bh(&ctx->lock);
  4416. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  4417. spin_unlock_bh(&ctx->lock);
  4418. __cam_isp_ctx_free_mem_hw_entries(ctx);
  4419. cam_req_mgr_workq_destroy(&ctx_isp->workq);
  4420. ctx->state = CAM_CTX_ACQUIRED;
  4421. trace_cam_context_state("ISP", ctx);
  4422. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  4423. ctx->ctx_id, ctx->state);
  4424. return rc;
  4425. }
  4426. /* top level state machine */
  4427. static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
  4428. struct cam_release_dev_cmd *cmd)
  4429. {
  4430. int rc = 0;
  4431. int i;
  4432. struct cam_hw_release_args rel_arg;
  4433. struct cam_isp_context *ctx_isp =
  4434. (struct cam_isp_context *) ctx->ctx_priv;
  4435. struct cam_req_mgr_flush_request flush_req;
  4436. if (cmd && ctx_isp->hw_ctx) {
  4437. CAM_ERR(CAM_ISP, "releasing hw");
  4438. __cam_isp_ctx_release_hw_in_top_state(ctx, NULL);
  4439. }
  4440. if (ctx_isp->hw_ctx) {
  4441. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4442. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  4443. &rel_arg);
  4444. ctx_isp->hw_ctx = NULL;
  4445. }
  4446. ctx->session_hdl = -1;
  4447. ctx->dev_hdl = -1;
  4448. ctx->link_hdl = -1;
  4449. ctx->ctx_crm_intf = NULL;
  4450. ctx->last_flush_req = 0;
  4451. ctx_isp->frame_id = 0;
  4452. ctx_isp->active_req_cnt = 0;
  4453. ctx_isp->reported_req_id = 0;
  4454. ctx_isp->hw_acquired = false;
  4455. ctx_isp->init_received = false;
  4456. ctx_isp->offline_context = false;
  4457. ctx_isp->rdi_only_context = false;
  4458. ctx_isp->req_info.last_bufdone_req_id = 0;
  4459. ctx_isp->v4l2_event_sub_ids = 0;
  4460. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4461. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4462. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4463. /*
  4464. * Ideally, we should never have any active request here.
  4465. * But we still add some sanity check code here to help the debug
  4466. */
  4467. if (!list_empty(&ctx->active_req_list))
  4468. CAM_ERR(CAM_ISP, "Active list is not empty");
  4469. /* Flush all the pending request list */
  4470. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  4471. flush_req.link_hdl = ctx->link_hdl;
  4472. flush_req.dev_hdl = ctx->dev_hdl;
  4473. flush_req.req_id = 0;
  4474. CAM_DBG(CAM_ISP, "try to flush pending list");
  4475. spin_lock_bh(&ctx->lock);
  4476. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  4477. spin_unlock_bh(&ctx->lock);
  4478. __cam_isp_ctx_free_mem_hw_entries(ctx);
  4479. ctx->state = CAM_CTX_AVAILABLE;
  4480. trace_cam_context_state("ISP", ctx);
  4481. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  4482. ctx->ctx_id, ctx->state);
  4483. return rc;
  4484. }
  4485. static int __cam_isp_ctx_config_dev_in_top_state(
  4486. struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
  4487. {
  4488. int rc = 0, i;
  4489. struct cam_ctx_request *req = NULL;
  4490. struct cam_isp_ctx_req *req_isp;
  4491. struct cam_packet *packet;
  4492. size_t remain_len = 0;
  4493. struct cam_hw_prepare_update_args cfg = {0};
  4494. struct cam_req_mgr_add_request add_req;
  4495. struct cam_isp_context *ctx_isp =
  4496. (struct cam_isp_context *) ctx->ctx_priv;
  4497. struct cam_hw_cmd_args hw_cmd_args;
  4498. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4499. uint32_t packet_opcode = 0;
  4500. CAM_DBG(CAM_ISP, "get free request object......");
  4501. /* get free request */
  4502. spin_lock_bh(&ctx->lock);
  4503. if (!list_empty(&ctx->free_req_list)) {
  4504. req = list_first_entry(&ctx->free_req_list,
  4505. struct cam_ctx_request, list);
  4506. list_del_init(&req->list);
  4507. }
  4508. spin_unlock_bh(&ctx->lock);
  4509. if (!req) {
  4510. CAM_ERR(CAM_ISP, "No more request obj free");
  4511. return -ENOMEM;
  4512. }
  4513. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4514. remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet);
  4515. if (IS_ERR(packet)) {
  4516. rc = PTR_ERR(packet);
  4517. goto free_req;
  4518. }
  4519. /* Query the packet opcode */
  4520. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4521. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4522. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE;
  4523. isp_hw_cmd_args.cmd_data = (void *)packet;
  4524. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4525. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4526. &hw_cmd_args);
  4527. if (rc) {
  4528. CAM_ERR(CAM_ISP, "HW command failed");
  4529. goto free_req;
  4530. }
  4531. packet_opcode = isp_hw_cmd_args.u.packet_op_code;
  4532. if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV)
  4533. && (packet->header.request_id <= ctx->last_flush_req)) {
  4534. CAM_INFO(CAM_ISP,
  4535. "request %lld has been flushed, reject packet",
  4536. packet->header.request_id);
  4537. rc = -EBADR;
  4538. goto free_req;
  4539. }
  4540. cfg.packet = packet;
  4541. cfg.remain_len = remain_len;
  4542. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4543. cfg.max_hw_update_entries = ctx->max_hw_update_entries;
  4544. cfg.hw_update_entries = req_isp->cfg;
  4545. cfg.max_out_map_entries = ctx->max_out_map_entries;
  4546. cfg.max_in_map_entries = ctx->max_in_map_entries;
  4547. cfg.out_map_entries = req_isp->fence_map_out;
  4548. cfg.in_map_entries = req_isp->fence_map_in;
  4549. cfg.priv = &req_isp->hw_update_data;
  4550. cfg.pf_data = &(req->pf_data);
  4551. cfg.num_out_map_entries = 0;
  4552. cfg.num_in_map_entries = 0;
  4553. memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
  4554. rc = ctx->hw_mgr_intf->hw_prepare_update(
  4555. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  4556. if (rc != 0) {
  4557. CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
  4558. rc = -EFAULT;
  4559. goto free_req;
  4560. }
  4561. req_isp->num_cfg = cfg.num_hw_update_entries;
  4562. req_isp->num_fence_map_out = cfg.num_out_map_entries;
  4563. req_isp->num_fence_map_in = cfg.num_in_map_entries;
  4564. req_isp->num_acked = 0;
  4565. req_isp->num_deferred_acks = 0;
  4566. req_isp->bubble_detected = false;
  4567. req_isp->cdm_reset_before_apply = false;
  4568. req_isp->hw_update_data.packet = packet;
  4569. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4570. rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
  4571. if (rc) {
  4572. CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
  4573. req_isp->fence_map_out[i].sync_id);
  4574. goto put_ref;
  4575. }
  4576. }
  4577. CAM_DBG(CAM_ISP,
  4578. "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d",
  4579. packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
  4580. req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in);
  4581. req->request_id = packet->header.request_id;
  4582. req->status = 1;
  4583. if (req_isp->hw_update_data.packet_opcode_type ==
  4584. CAM_ISP_PACKET_INIT_DEV) {
  4585. if (ctx->state < CAM_CTX_ACTIVATED) {
  4586. rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
  4587. if (rc)
  4588. CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
  4589. ctx_isp->init_received = true;
  4590. } else {
  4591. rc = -EINVAL;
  4592. CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state:%d",
  4593. ctx->state);
  4594. }
  4595. } else {
  4596. if ((ctx->state == CAM_CTX_FLUSHED) || (ctx->state < CAM_CTX_READY)) {
  4597. rc = -EINVAL;
  4598. CAM_ERR(CAM_ISP, "Received update req %lld in wrong state:%d",
  4599. req->request_id, ctx->state);
  4600. goto put_ref;
  4601. }
  4602. if (ctx_isp->offline_context) {
  4603. __cam_isp_ctx_enqueue_request_in_order(ctx, req);
  4604. } else if (ctx->ctx_crm_intf->add_req) {
  4605. memset(&add_req, 0, sizeof(add_req));
  4606. add_req.link_hdl = ctx->link_hdl;
  4607. add_req.dev_hdl = ctx->dev_hdl;
  4608. add_req.req_id = req->request_id;
  4609. rc = ctx->ctx_crm_intf->add_req(&add_req);
  4610. if (rc) {
  4611. CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
  4612. req->request_id);
  4613. } else {
  4614. __cam_isp_ctx_enqueue_request_in_order(
  4615. ctx, req);
  4616. }
  4617. } else {
  4618. CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu", req->request_id);
  4619. rc = -ENODEV;
  4620. }
  4621. }
  4622. if (rc)
  4623. goto put_ref;
  4624. CAM_DBG(CAM_REQ,
  4625. "Preprocessing Config req_id %lld successful on ctx %u",
  4626. req->request_id, ctx->ctx_id);
  4627. if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch)) {
  4628. __cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
  4629. }
  4630. return rc;
  4631. put_ref:
  4632. for (--i; i >= 0; i--) {
  4633. if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id))
  4634. CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
  4635. req_isp->fence_map_out[i].sync_id);
  4636. }
  4637. free_req:
  4638. spin_lock_bh(&ctx->lock);
  4639. list_add_tail(&req->list, &ctx->free_req_list);
  4640. spin_unlock_bh(&ctx->lock);
  4641. return rc;
  4642. }
  4643. static int __cam_isp_ctx_allocate_mem_hw_entries(
  4644. struct cam_context *ctx,
  4645. struct cam_hw_acquire_args *param)
  4646. {
  4647. int rc = 0;
  4648. uint32_t max_res = 0;
  4649. uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX;
  4650. struct cam_ctx_request *req;
  4651. struct cam_ctx_request *temp_req;
  4652. struct cam_isp_ctx_req *req_isp;
  4653. size_t num_entries = 0;
  4654. if (!param->op_params.param_list[0])
  4655. max_res = CAM_ISP_CTX_RES_MAX;
  4656. else {
  4657. max_res = param->op_params.param_list[0];
  4658. if (param->op_flags & CAM_IFE_CTX_SFE_EN) {
  4659. max_res += param->op_params.param_list[1];
  4660. max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX;
  4661. }
  4662. }
  4663. ctx->max_in_map_entries = max_res;
  4664. ctx->max_out_map_entries = max_res;
  4665. ctx->max_hw_update_entries = max_hw_upd_entries;
  4666. CAM_DBG(CAM_ISP,
  4667. "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d",
  4668. max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN));
  4669. num_entries = ctx->max_hw_update_entries * CAM_ISP_CTX_REQ_MAX;
  4670. ctx->hw_update_entry = kcalloc(num_entries,
  4671. sizeof(struct cam_hw_update_entry),
  4672. GFP_KERNEL);
  4673. if (!ctx->hw_update_entry) {
  4674. CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
  4675. return -ENOMEM;
  4676. }
  4677. num_entries = ctx->max_in_map_entries * CAM_ISP_CTX_REQ_MAX;
  4678. ctx->in_map_entries = kcalloc(num_entries,
  4679. sizeof(struct cam_hw_fence_map_entry),
  4680. GFP_KERNEL);
  4681. if (!ctx->in_map_entries) {
  4682. CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
  4683. rc = -ENOMEM;
  4684. goto end;
  4685. }
  4686. num_entries = ctx->max_out_map_entries * CAM_ISP_CTX_REQ_MAX;
  4687. ctx->out_map_entries = kcalloc(num_entries,
  4688. sizeof(struct cam_hw_fence_map_entry),
  4689. GFP_KERNEL);
  4690. if (!ctx->out_map_entries) {
  4691. CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
  4692. rc = -ENOMEM;
  4693. goto end;
  4694. }
  4695. list_for_each_entry_safe(req, temp_req,
  4696. &ctx->free_req_list, list) {
  4697. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4698. req_isp->cfg =
  4699. &ctx->hw_update_entry[req->index * ctx->max_hw_update_entries];
  4700. req_isp->fence_map_in =
  4701. &ctx->in_map_entries[req->index * ctx->max_in_map_entries];
  4702. req_isp->fence_map_out =
  4703. &ctx->out_map_entries[req->index * ctx->max_out_map_entries];
  4704. }
  4705. return rc;
  4706. end:
  4707. __cam_isp_ctx_free_mem_hw_entries(ctx);
  4708. return rc;
  4709. }
  4710. static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
  4711. struct cam_acquire_dev_cmd *cmd)
  4712. {
  4713. int rc = 0;
  4714. int i;
  4715. struct cam_hw_acquire_args param;
  4716. struct cam_isp_resource *isp_res = NULL;
  4717. struct cam_create_dev_hdl req_hdl_param;
  4718. struct cam_hw_release_args release;
  4719. struct cam_isp_context *ctx_isp =
  4720. (struct cam_isp_context *) ctx->ctx_priv;
  4721. struct cam_hw_cmd_args hw_cmd_args;
  4722. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4723. if (!ctx->hw_mgr_intf) {
  4724. CAM_ERR(CAM_ISP, "HW interface is not ready");
  4725. rc = -EFAULT;
  4726. goto end;
  4727. }
  4728. CAM_DBG(CAM_ISP,
  4729. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  4730. cmd->session_handle, cmd->num_resources,
  4731. cmd->handle_type, cmd->resource_hdl);
  4732. ctx_isp->v4l2_event_sub_ids = cam_req_mgr_get_id_subscribed();
  4733. if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
  4734. ctx_isp->split_acquire = true;
  4735. CAM_DBG(CAM_ISP, "Acquire dev handle");
  4736. goto get_dev_handle;
  4737. }
  4738. if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
  4739. CAM_ERR(CAM_ISP, "Too much resources in the acquire");
  4740. rc = -ENOMEM;
  4741. goto end;
  4742. }
  4743. /* for now we only support user pointer */
  4744. if (cmd->handle_type != 1) {
  4745. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  4746. rc = -EINVAL;
  4747. goto end;
  4748. }
  4749. isp_res = kzalloc(
  4750. sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
  4751. if (!isp_res) {
  4752. rc = -ENOMEM;
  4753. goto end;
  4754. }
  4755. CAM_DBG(CAM_ISP, "start copy %d resources from user",
  4756. cmd->num_resources);
  4757. if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
  4758. sizeof(*isp_res)*cmd->num_resources)) {
  4759. rc = -EFAULT;
  4760. goto free_res;
  4761. }
  4762. memset(&param, 0, sizeof(param));
  4763. param.context_data = ctx;
  4764. param.event_cb = ctx->irq_cb_intf;
  4765. param.num_acq = cmd->num_resources;
  4766. param.acquire_info = (uintptr_t) isp_res;
  4767. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  4768. if (rc) {
  4769. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  4770. ctx->ctx_id);
  4771. goto free_res;
  4772. }
  4773. /* call HW manager to reserve the resource */
  4774. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  4775. &param);
  4776. if (rc != 0) {
  4777. CAM_ERR(CAM_ISP, "Acquire device failed");
  4778. goto free_res;
  4779. }
  4780. /* Query the context has rdi only resource */
  4781. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  4782. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4783. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  4784. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4785. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4786. &hw_cmd_args);
  4787. if (rc) {
  4788. CAM_ERR(CAM_ISP, "HW command failed");
  4789. goto free_hw;
  4790. }
  4791. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  4792. /*
  4793. * this context has rdi only resource assign rdi only
  4794. * state machine
  4795. */
  4796. CAM_DBG(CAM_ISP, "RDI only session Context");
  4797. ctx_isp->substate_machine_irq =
  4798. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  4799. ctx_isp->substate_machine =
  4800. cam_isp_ctx_rdi_only_activated_state_machine;
  4801. ctx_isp->rdi_only_context = true;
  4802. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  4803. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  4804. ctx_isp->substate_machine_irq =
  4805. cam_isp_ctx_fs2_state_machine_irq;
  4806. ctx_isp->substate_machine =
  4807. cam_isp_ctx_fs2_state_machine;
  4808. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  4809. CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
  4810. ctx_isp->substate_machine_irq =
  4811. cam_isp_ctx_offline_state_machine_irq;
  4812. } else {
  4813. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  4814. ctx_isp->substate_machine_irq =
  4815. cam_isp_ctx_activated_state_machine_irq;
  4816. ctx_isp->substate_machine =
  4817. cam_isp_ctx_activated_state_machine;
  4818. }
  4819. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  4820. ctx_isp->hw_acquired = true;
  4821. ctx_isp->split_acquire = false;
  4822. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  4823. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4824. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4825. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4826. kfree(isp_res);
  4827. isp_res = NULL;
  4828. get_dev_handle:
  4829. req_hdl_param.session_hdl = cmd->session_handle;
  4830. /* bridge is not ready for these flags. so false for now */
  4831. req_hdl_param.v4l2_sub_dev_flag = 0;
  4832. req_hdl_param.media_entity_flag = 0;
  4833. req_hdl_param.ops = ctx->crm_ctx_intf;
  4834. req_hdl_param.priv = ctx;
  4835. req_hdl_param.dev_id = CAM_ISP;
  4836. CAM_DBG(CAM_ISP, "get device handle form bridge");
  4837. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  4838. if (ctx->dev_hdl <= 0) {
  4839. rc = -EFAULT;
  4840. CAM_ERR(CAM_ISP, "Can not create device handle");
  4841. goto free_hw;
  4842. }
  4843. cmd->dev_handle = ctx->dev_hdl;
  4844. /* store session information */
  4845. ctx->session_hdl = cmd->session_handle;
  4846. ctx->state = CAM_CTX_ACQUIRED;
  4847. trace_cam_context_state("ISP", ctx);
  4848. CAM_DBG(CAM_ISP,
  4849. "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
  4850. cmd->session_handle, cmd->num_resources, ctx->ctx_id);
  4851. return rc;
  4852. free_hw:
  4853. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4854. if (ctx_isp->hw_acquired)
  4855. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  4856. &release);
  4857. ctx_isp->hw_ctx = NULL;
  4858. ctx_isp->hw_acquired = false;
  4859. free_res:
  4860. kfree(isp_res);
  4861. end:
  4862. return rc;
  4863. }
  4864. static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
  4865. void *args)
  4866. {
  4867. int rc = 0;
  4868. int i;
  4869. struct cam_acquire_hw_cmd_v1 *cmd =
  4870. (struct cam_acquire_hw_cmd_v1 *)args;
  4871. struct cam_hw_acquire_args param;
  4872. struct cam_hw_release_args release;
  4873. struct cam_isp_context *ctx_isp =
  4874. (struct cam_isp_context *) ctx->ctx_priv;
  4875. struct cam_hw_cmd_args hw_cmd_args;
  4876. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4877. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  4878. if (!ctx->hw_mgr_intf) {
  4879. CAM_ERR(CAM_ISP, "HW interface is not ready");
  4880. rc = -EFAULT;
  4881. goto end;
  4882. }
  4883. CAM_DBG(CAM_ISP,
  4884. "session_hdl 0x%x, hdl type %d, res %lld",
  4885. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  4886. /* for now we only support user pointer */
  4887. if (cmd->handle_type != 1) {
  4888. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  4889. rc = -EINVAL;
  4890. goto end;
  4891. }
  4892. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  4893. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  4894. goto end;
  4895. }
  4896. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  4897. if (!acquire_hw_info) {
  4898. rc = -ENOMEM;
  4899. goto end;
  4900. }
  4901. CAM_DBG(CAM_ISP, "start copy resources from user");
  4902. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  4903. cmd->data_size)) {
  4904. rc = -EFAULT;
  4905. goto free_res;
  4906. }
  4907. memset(&param, 0, sizeof(param));
  4908. param.context_data = ctx;
  4909. param.event_cb = ctx->irq_cb_intf;
  4910. param.num_acq = CAM_API_COMPAT_CONSTANT;
  4911. param.acquire_info_size = cmd->data_size;
  4912. param.acquire_info = (uint64_t) acquire_hw_info;
  4913. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  4914. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
  4915. &param);
  4916. if (rc) {
  4917. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  4918. ctx->ctx_id);
  4919. goto free_res;
  4920. }
  4921. /* call HW manager to reserve the resource */
  4922. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  4923. &param);
  4924. if (rc != 0) {
  4925. CAM_ERR(CAM_ISP, "Acquire device failed");
  4926. goto free_res;
  4927. }
  4928. ctx_isp->support_consumed_addr =
  4929. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  4930. /* Query the context has rdi only resource */
  4931. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  4932. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4933. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  4934. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4935. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4936. &hw_cmd_args);
  4937. if (rc) {
  4938. CAM_ERR(CAM_ISP, "HW command failed");
  4939. goto free_hw;
  4940. }
  4941. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  4942. /*
  4943. * this context has rdi only resource assign rdi only
  4944. * state machine
  4945. */
  4946. CAM_DBG(CAM_ISP, "RDI only session Context");
  4947. ctx_isp->substate_machine_irq =
  4948. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  4949. ctx_isp->substate_machine =
  4950. cam_isp_ctx_rdi_only_activated_state_machine;
  4951. ctx_isp->rdi_only_context = true;
  4952. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  4953. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  4954. ctx_isp->substate_machine_irq =
  4955. cam_isp_ctx_fs2_state_machine_irq;
  4956. ctx_isp->substate_machine =
  4957. cam_isp_ctx_fs2_state_machine;
  4958. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  4959. CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
  4960. ctx_isp->substate_machine_irq =
  4961. cam_isp_ctx_offline_state_machine_irq;
  4962. ctx_isp->substate_machine = NULL;
  4963. } else {
  4964. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  4965. ctx_isp->substate_machine_irq =
  4966. cam_isp_ctx_activated_state_machine_irq;
  4967. ctx_isp->substate_machine =
  4968. cam_isp_ctx_activated_state_machine;
  4969. }
  4970. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  4971. ctx_isp->hw_acquired = true;
  4972. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  4973. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4974. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4975. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4976. trace_cam_context_state("ISP", ctx);
  4977. CAM_DBG(CAM_ISP,
  4978. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  4979. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  4980. kfree(acquire_hw_info);
  4981. return rc;
  4982. free_hw:
  4983. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4984. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  4985. ctx_isp->hw_ctx = NULL;
  4986. ctx_isp->hw_acquired = false;
  4987. free_res:
  4988. kfree(acquire_hw_info);
  4989. end:
  4990. return rc;
  4991. }
  4992. static void cam_req_mgr_process_workq_offline_ife_worker(struct work_struct *w)
  4993. {
  4994. cam_req_mgr_process_workq(w);
  4995. }
  4996. static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
  4997. void *args)
  4998. {
  4999. int rc = 0, i, j;
  5000. struct cam_acquire_hw_cmd_v2 *cmd =
  5001. (struct cam_acquire_hw_cmd_v2 *)args;
  5002. struct cam_hw_acquire_args param;
  5003. struct cam_hw_release_args release;
  5004. struct cam_isp_context *ctx_isp =
  5005. (struct cam_isp_context *) ctx->ctx_priv;
  5006. struct cam_hw_cmd_args hw_cmd_args;
  5007. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5008. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5009. if (!ctx->hw_mgr_intf) {
  5010. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5011. rc = -EFAULT;
  5012. goto end;
  5013. }
  5014. CAM_DBG(CAM_ISP,
  5015. "session_hdl 0x%x, hdl type %d, res %lld",
  5016. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5017. /* for now we only support user pointer */
  5018. if (cmd->handle_type != 1) {
  5019. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5020. rc = -EINVAL;
  5021. goto end;
  5022. }
  5023. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5024. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5025. goto end;
  5026. }
  5027. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5028. if (!acquire_hw_info) {
  5029. rc = -ENOMEM;
  5030. goto end;
  5031. }
  5032. CAM_DBG(CAM_ISP, "start copy resources from user");
  5033. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5034. cmd->data_size)) {
  5035. rc = -EFAULT;
  5036. goto free_res;
  5037. }
  5038. memset(&param, 0, sizeof(param));
  5039. param.context_data = ctx;
  5040. param.event_cb = ctx->irq_cb_intf;
  5041. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5042. param.acquire_info_size = cmd->data_size;
  5043. param.acquire_info = (uint64_t) acquire_hw_info;
  5044. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5045. /* call HW manager to reserve the resource */
  5046. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5047. &param);
  5048. if (rc != 0) {
  5049. CAM_ERR(CAM_ISP, "Acquire device failed");
  5050. goto free_res;
  5051. }
  5052. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5053. if (rc) {
  5054. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5055. ctx->ctx_id);
  5056. goto free_hw;
  5057. }
  5058. /*
  5059. * Set feature flag if applicable
  5060. * custom hw is supported only on v2
  5061. */
  5062. ctx_isp->custom_enabled =
  5063. (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
  5064. ctx_isp->use_frame_header_ts =
  5065. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5066. ctx_isp->use_default_apply =
  5067. (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG);
  5068. ctx_isp->support_consumed_addr =
  5069. (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
  5070. ctx_isp->aeb_enabled =
  5071. (param.op_flags & CAM_IFE_CTX_AEB_EN);
  5072. if ((ctx_isp->aeb_enabled) && (!isp_ctx_debug.disable_internal_recovery))
  5073. ctx_isp->do_internal_recovery = true;
  5074. /* Query the context has rdi only resource */
  5075. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5076. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5077. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5078. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5079. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5080. &hw_cmd_args);
  5081. if (rc) {
  5082. CAM_ERR(CAM_ISP, "HW command failed");
  5083. goto free_hw;
  5084. }
  5085. if (param.valid_acquired_hw) {
  5086. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5087. cmd->hw_info.acquired_hw_id[i] =
  5088. param.acquired_hw_id[i];
  5089. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5090. for (j = 0; j < CAM_MAX_HW_SPLIT; j++)
  5091. cmd->hw_info.acquired_hw_path[i][j] =
  5092. param.acquired_hw_path[i][j];
  5093. }
  5094. cmd->hw_info.valid_acquired_hw =
  5095. param.valid_acquired_hw;
  5096. cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw;
  5097. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5098. /*
  5099. * this context has rdi only resource assign rdi only
  5100. * state machine
  5101. */
  5102. CAM_DBG(CAM_ISP, "RDI only session Context");
  5103. ctx_isp->substate_machine_irq =
  5104. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5105. ctx_isp->substate_machine =
  5106. cam_isp_ctx_rdi_only_activated_state_machine;
  5107. ctx_isp->rdi_only_context = true;
  5108. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5109. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5110. ctx_isp->substate_machine_irq =
  5111. cam_isp_ctx_fs2_state_machine_irq;
  5112. ctx_isp->substate_machine =
  5113. cam_isp_ctx_fs2_state_machine;
  5114. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5115. CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
  5116. ctx_isp->substate_machine_irq =
  5117. cam_isp_ctx_offline_state_machine_irq;
  5118. ctx_isp->substate_machine = NULL;
  5119. ctx_isp->offline_context = true;
  5120. rc = cam_req_mgr_workq_create("offline_ife", 20,
  5121. &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0,
  5122. cam_req_mgr_process_workq_offline_ife_worker);
  5123. if (rc)
  5124. CAM_ERR(CAM_ISP,
  5125. "Failed to create workq for offline IFE rc:%d",
  5126. rc);
  5127. } else {
  5128. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5129. ctx_isp->substate_machine_irq =
  5130. cam_isp_ctx_activated_state_machine_irq;
  5131. ctx_isp->substate_machine =
  5132. cam_isp_ctx_activated_state_machine;
  5133. }
  5134. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5135. ctx_isp->hw_acquired = true;
  5136. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5137. trace_cam_context_state("ISP", ctx);
  5138. CAM_DBG(CAM_ISP,
  5139. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5140. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5141. kfree(acquire_hw_info);
  5142. return rc;
  5143. free_hw:
  5144. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5145. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5146. ctx_isp->hw_ctx = NULL;
  5147. ctx_isp->hw_acquired = false;
  5148. free_res:
  5149. kfree(acquire_hw_info);
  5150. end:
  5151. return rc;
  5152. }
  5153. static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
  5154. void *args)
  5155. {
  5156. int rc = -EINVAL;
  5157. uint32_t api_version;
  5158. if (!ctx || !args) {
  5159. CAM_ERR(CAM_ISP, "Invalid input pointer");
  5160. return rc;
  5161. }
  5162. api_version = *((uint32_t *)args);
  5163. if (api_version == 1)
  5164. rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
  5165. else if (api_version == 2)
  5166. rc = __cam_isp_ctx_acquire_hw_v2(ctx, args);
  5167. else
  5168. CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
  5169. return rc;
  5170. }
  5171. static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
  5172. struct cam_config_dev_cmd *cmd)
  5173. {
  5174. int rc = 0;
  5175. struct cam_isp_context *ctx_isp =
  5176. (struct cam_isp_context *) ctx->ctx_priv;
  5177. if (!ctx_isp->hw_acquired) {
  5178. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  5179. return -EINVAL;
  5180. }
  5181. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  5182. if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
  5183. ctx->state = CAM_CTX_READY;
  5184. trace_cam_context_state("ISP", ctx);
  5185. }
  5186. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  5187. return rc;
  5188. }
  5189. static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
  5190. struct cam_config_dev_cmd *cmd)
  5191. {
  5192. int rc = 0;
  5193. struct cam_start_stop_dev_cmd start_cmd;
  5194. struct cam_hw_cmd_args hw_cmd_args;
  5195. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5196. struct cam_isp_context *ctx_isp =
  5197. (struct cam_isp_context *) ctx->ctx_priv;
  5198. if (!ctx_isp->hw_acquired) {
  5199. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  5200. rc = -EINVAL;
  5201. goto end;
  5202. }
  5203. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  5204. if (rc)
  5205. goto end;
  5206. if (!ctx_isp->init_received) {
  5207. CAM_WARN(CAM_ISP,
  5208. "Received update packet in flushed state, skip start");
  5209. goto end;
  5210. }
  5211. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5212. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5213. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  5214. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5215. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5216. &hw_cmd_args);
  5217. if (rc) {
  5218. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
  5219. goto end;
  5220. }
  5221. start_cmd.dev_handle = cmd->dev_handle;
  5222. start_cmd.session_handle = cmd->session_handle;
  5223. rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
  5224. if (rc)
  5225. CAM_ERR(CAM_ISP,
  5226. "Failed to re-start HW after flush rc: %d", rc);
  5227. else
  5228. CAM_INFO(CAM_ISP,
  5229. "Received init after flush. Re-start HW complete in ctx:%d",
  5230. ctx->ctx_id);
  5231. end:
  5232. CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
  5233. ctx_isp->substate_activated);
  5234. return rc;
  5235. }
  5236. static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
  5237. struct cam_req_mgr_core_dev_link_setup *link)
  5238. {
  5239. int rc = 0;
  5240. struct cam_isp_context *ctx_isp =
  5241. (struct cam_isp_context *) ctx->ctx_priv;
  5242. if (!link) {
  5243. CAM_ERR(CAM_ISP, "setup link info is null: %pK ctx: %u",
  5244. link, ctx->ctx_id);
  5245. return -EINVAL;
  5246. }
  5247. if (!link->crm_cb) {
  5248. CAM_ERR(CAM_ISP, "crm cb is null: %pK ctx: %u",
  5249. link->crm_cb, ctx->ctx_id);
  5250. return -EINVAL;
  5251. }
  5252. CAM_DBG(CAM_ISP, "Enter.........");
  5253. ctx->link_hdl = link->link_hdl;
  5254. ctx->ctx_crm_intf = link->crm_cb;
  5255. ctx_isp->subscribe_event =
  5256. CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF;
  5257. ctx_isp->trigger_id = link->trigger_id;
  5258. /* change state only if we had the init config */
  5259. if (ctx_isp->init_received) {
  5260. ctx->state = CAM_CTX_READY;
  5261. trace_cam_context_state("ISP", ctx);
  5262. }
  5263. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  5264. return rc;
  5265. }
  5266. static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
  5267. struct cam_req_mgr_core_dev_link_setup *unlink)
  5268. {
  5269. int rc = 0;
  5270. struct cam_isp_context *ctx_isp =
  5271. (struct cam_isp_context *) ctx->ctx_priv;
  5272. ctx->link_hdl = -1;
  5273. ctx->ctx_crm_intf = NULL;
  5274. ctx_isp->trigger_id = -1;
  5275. return rc;
  5276. }
  5277. static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  5278. struct cam_req_mgr_device_info *dev_info)
  5279. {
  5280. int rc = 0;
  5281. dev_info->dev_hdl = ctx->dev_hdl;
  5282. strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
  5283. dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
  5284. dev_info->p_delay = 1;
  5285. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  5286. dev_info->trigger_on = true;
  5287. return rc;
  5288. }
  5289. static inline void __cam_isp_context_reset_ctx_params(
  5290. struct cam_isp_context *ctx_isp)
  5291. {
  5292. atomic_set(&ctx_isp->process_bubble, 0);
  5293. atomic_set(&ctx_isp->rxd_epoch, 0);
  5294. atomic_set(&ctx_isp->internal_recovery_set, 0);
  5295. ctx_isp->frame_id = 0;
  5296. ctx_isp->sof_timestamp_val = 0;
  5297. ctx_isp->boot_timestamp = 0;
  5298. ctx_isp->active_req_cnt = 0;
  5299. ctx_isp->reported_req_id = 0;
  5300. ctx_isp->bubble_frame_cnt = 0;
  5301. ctx_isp->recovery_req_id = 0;
  5302. }
  5303. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  5304. struct cam_start_stop_dev_cmd *cmd)
  5305. {
  5306. int rc = 0;
  5307. int i;
  5308. struct cam_isp_start_args start_isp;
  5309. struct cam_ctx_request *req;
  5310. struct cam_isp_ctx_req *req_isp;
  5311. struct cam_isp_context *ctx_isp =
  5312. (struct cam_isp_context *) ctx->ctx_priv;
  5313. if (cmd->session_handle != ctx->session_hdl ||
  5314. cmd->dev_handle != ctx->dev_hdl) {
  5315. rc = -EPERM;
  5316. goto end;
  5317. }
  5318. if (list_empty(&ctx->pending_req_list)) {
  5319. /* should never happen */
  5320. CAM_ERR(CAM_ISP, "Start device with empty configuration");
  5321. rc = -EFAULT;
  5322. goto end;
  5323. } else {
  5324. req = list_first_entry(&ctx->pending_req_list,
  5325. struct cam_ctx_request, list);
  5326. }
  5327. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5328. if (!ctx_isp->hw_ctx) {
  5329. CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
  5330. rc = -EFAULT;
  5331. goto end;
  5332. }
  5333. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5334. start_isp.hw_config.request_id = req->request_id;
  5335. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  5336. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  5337. start_isp.hw_config.priv = &req_isp->hw_update_data;
  5338. start_isp.hw_config.init_packet = 1;
  5339. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_NONE;
  5340. start_isp.hw_config.cdm_reset_before_apply = false;
  5341. ctx_isp->last_applied_req_id = req->request_id;
  5342. if (ctx->state == CAM_CTX_FLUSHED)
  5343. start_isp.start_only = true;
  5344. else
  5345. start_isp.start_only = false;
  5346. __cam_isp_context_reset_ctx_params(ctx_isp);
  5347. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  5348. CAM_ISP_CTX_ACTIVATED_APPLIED :
  5349. (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
  5350. CAM_ISP_CTX_ACTIVATED_SOF;
  5351. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5352. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5353. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5354. /*
  5355. * In case of CSID TPG we might receive SOF and RUP IRQs
  5356. * before hw_mgr_intf->hw_start has returned. So move
  5357. * req out of pending list before hw_start and add it
  5358. * back to pending list if hw_start fails.
  5359. */
  5360. list_del_init(&req->list);
  5361. if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
  5362. list_add_tail(&req->list, &ctx->free_req_list);
  5363. atomic_set(&ctx_isp->rxd_epoch, 1);
  5364. CAM_DBG(CAM_REQ,
  5365. "Move pending req: %lld to free list(cnt: %d) offline ctx %u",
  5366. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  5367. } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
  5368. list_add_tail(&req->list, &ctx->wait_req_list);
  5369. CAM_DBG(CAM_REQ,
  5370. "Move pending req: %lld to wait list(cnt: %d) ctx %u",
  5371. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  5372. } else {
  5373. list_add_tail(&req->list, &ctx->active_req_list);
  5374. ctx_isp->active_req_cnt++;
  5375. CAM_DBG(CAM_REQ,
  5376. "Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
  5377. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
  5378. ctx_isp->offline_context);
  5379. }
  5380. /*
  5381. * Only place to change state before calling the hw due to
  5382. * hardware tasklet has higher priority that can cause the
  5383. * irq handling comes early
  5384. */
  5385. ctx->state = CAM_CTX_ACTIVATED;
  5386. trace_cam_context_state("ISP", ctx);
  5387. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  5388. &start_isp);
  5389. if (rc) {
  5390. /* HW failure. user need to clean up the resource */
  5391. CAM_ERR(CAM_ISP, "Start HW failed");
  5392. ctx->state = CAM_CTX_READY;
  5393. if ((rc == -ETIMEDOUT) &&
  5394. (isp_ctx_debug.enable_cdm_cmd_buff_dump))
  5395. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  5396. trace_cam_context_state("ISP", ctx);
  5397. list_del_init(&req->list);
  5398. list_add(&req->list, &ctx->pending_req_list);
  5399. goto end;
  5400. }
  5401. CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
  5402. end:
  5403. return rc;
  5404. }
  5405. static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
  5406. struct cam_req_mgr_core_dev_link_setup *unlink)
  5407. {
  5408. int rc = 0;
  5409. ctx->link_hdl = -1;
  5410. ctx->ctx_crm_intf = NULL;
  5411. ctx->state = CAM_CTX_ACQUIRED;
  5412. trace_cam_context_state("ISP", ctx);
  5413. return rc;
  5414. }
  5415. static int __cam_isp_ctx_stop_dev_in_activated_unlock(
  5416. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  5417. {
  5418. int rc = 0;
  5419. uint32_t i;
  5420. struct cam_hw_stop_args stop;
  5421. struct cam_ctx_request *req;
  5422. struct cam_isp_ctx_req *req_isp;
  5423. struct cam_isp_context *ctx_isp =
  5424. (struct cam_isp_context *) ctx->ctx_priv;
  5425. struct cam_isp_stop_args stop_isp;
  5426. /* Mask off all the incoming hardware events */
  5427. spin_lock_bh(&ctx->lock);
  5428. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  5429. spin_unlock_bh(&ctx->lock);
  5430. /* stop hw first */
  5431. if (ctx_isp->hw_ctx) {
  5432. stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5433. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  5434. stop_isp.stop_only = false;
  5435. stop_isp.internal_trigger = false;
  5436. stop.args = (void *) &stop_isp;
  5437. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  5438. &stop);
  5439. }
  5440. CAM_DBG(CAM_ISP, "next Substate[%s]",
  5441. __cam_isp_ctx_substate_val_to_type(
  5442. ctx_isp->substate_activated));
  5443. if (ctx->ctx_crm_intf &&
  5444. ctx->ctx_crm_intf->notify_stop) {
  5445. struct cam_req_mgr_notify_stop notify;
  5446. notify.link_hdl = ctx->link_hdl;
  5447. CAM_DBG(CAM_ISP,
  5448. "Notify CRM about device stop ctx %u link 0x%x",
  5449. ctx->ctx_id, ctx->link_hdl);
  5450. ctx->ctx_crm_intf->notify_stop(&notify);
  5451. } else if (!ctx_isp->offline_context)
  5452. CAM_ERR(CAM_ISP, "cb not present");
  5453. while (!list_empty(&ctx->pending_req_list)) {
  5454. req = list_first_entry(&ctx->pending_req_list,
  5455. struct cam_ctx_request, list);
  5456. list_del_init(&req->list);
  5457. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5458. CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
  5459. req_isp->num_fence_map_out);
  5460. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5461. if (req_isp->fence_map_out[i].sync_id != -1) {
  5462. cam_sync_signal(
  5463. req_isp->fence_map_out[i].sync_id,
  5464. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5465. CAM_SYNC_ISP_EVENT_HW_STOP);
  5466. }
  5467. list_add_tail(&req->list, &ctx->free_req_list);
  5468. }
  5469. while (!list_empty(&ctx->wait_req_list)) {
  5470. req = list_first_entry(&ctx->wait_req_list,
  5471. struct cam_ctx_request, list);
  5472. list_del_init(&req->list);
  5473. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5474. CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
  5475. req_isp->num_fence_map_out);
  5476. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5477. if (req_isp->fence_map_out[i].sync_id != -1) {
  5478. cam_sync_signal(
  5479. req_isp->fence_map_out[i].sync_id,
  5480. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5481. CAM_SYNC_ISP_EVENT_HW_STOP);
  5482. }
  5483. list_add_tail(&req->list, &ctx->free_req_list);
  5484. }
  5485. while (!list_empty(&ctx->active_req_list)) {
  5486. req = list_first_entry(&ctx->active_req_list,
  5487. struct cam_ctx_request, list);
  5488. list_del_init(&req->list);
  5489. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5490. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  5491. req_isp->num_fence_map_out);
  5492. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5493. if (req_isp->fence_map_out[i].sync_id != -1) {
  5494. cam_sync_signal(
  5495. req_isp->fence_map_out[i].sync_id,
  5496. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5497. CAM_SYNC_ISP_EVENT_HW_STOP);
  5498. }
  5499. list_add_tail(&req->list, &ctx->free_req_list);
  5500. }
  5501. ctx_isp->frame_id = 0;
  5502. ctx_isp->active_req_cnt = 0;
  5503. ctx_isp->reported_req_id = 0;
  5504. ctx_isp->last_applied_req_id = 0;
  5505. ctx_isp->req_info.last_bufdone_req_id = 0;
  5506. ctx_isp->bubble_frame_cnt = 0;
  5507. atomic_set(&ctx_isp->process_bubble, 0);
  5508. atomic_set(&ctx_isp->internal_recovery_set, 0);
  5509. atomic_set(&ctx_isp->rxd_epoch, 0);
  5510. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5511. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5512. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5513. CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
  5514. ctx->state, ctx->ctx_id);
  5515. if (!stop_cmd) {
  5516. rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL);
  5517. if (rc)
  5518. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  5519. }
  5520. return rc;
  5521. }
  5522. static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
  5523. struct cam_start_stop_dev_cmd *cmd)
  5524. {
  5525. int rc = 0;
  5526. struct cam_isp_context *ctx_isp =
  5527. (struct cam_isp_context *)ctx->ctx_priv;
  5528. __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
  5529. ctx_isp->init_received = false;
  5530. ctx->state = CAM_CTX_ACQUIRED;
  5531. trace_cam_context_state("ISP", ctx);
  5532. return rc;
  5533. }
  5534. static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
  5535. struct cam_release_dev_cmd *cmd)
  5536. {
  5537. int rc = 0;
  5538. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  5539. if (rc)
  5540. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  5541. rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
  5542. if (rc)
  5543. CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
  5544. return rc;
  5545. }
  5546. static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
  5547. void *cmd)
  5548. {
  5549. int rc = 0;
  5550. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  5551. if (rc)
  5552. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  5553. rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
  5554. if (rc)
  5555. CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
  5556. return rc;
  5557. }
  5558. static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
  5559. {
  5560. int rc = 0;
  5561. struct cam_hw_cmd_args hw_cmd_args;
  5562. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5563. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  5564. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5565. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
  5566. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5567. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5568. &hw_cmd_args);
  5569. return rc;
  5570. }
  5571. static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
  5572. {
  5573. int rc = 0;
  5574. struct cam_hw_cmd_args hw_cmd_args;
  5575. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5576. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  5577. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5578. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  5579. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5580. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5581. &hw_cmd_args);
  5582. return rc;
  5583. }
  5584. static int __cam_isp_ctx_handle_sof_freeze_evt(
  5585. struct cam_context *ctx)
  5586. {
  5587. int rc = 0;
  5588. struct cam_hw_cmd_args hw_cmd_args;
  5589. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5590. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  5591. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5592. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
  5593. isp_hw_cmd_args.u.sof_irq_enable = 1;
  5594. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5595. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5596. &hw_cmd_args);
  5597. return rc;
  5598. }
  5599. static int __cam_isp_ctx_reset_and_recover(
  5600. bool skip_resume, struct cam_context *ctx)
  5601. {
  5602. int rc = 0;
  5603. struct cam_isp_context *ctx_isp =
  5604. (struct cam_isp_context *)ctx->ctx_priv;
  5605. struct cam_isp_stop_args stop_isp;
  5606. struct cam_hw_stop_args stop_args;
  5607. struct cam_isp_start_args start_isp;
  5608. struct cam_hw_cmd_args hw_cmd_args;
  5609. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5610. struct cam_ctx_request *req;
  5611. struct cam_isp_ctx_req *req_isp;
  5612. spin_lock(&ctx->lock);
  5613. if (ctx_isp->active_req_cnt) {
  5614. spin_unlock(&ctx->lock);
  5615. CAM_WARN(CAM_ISP,
  5616. "Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done",
  5617. ctx_isp->active_req_cnt, ctx->ctx_id,
  5618. ctx->link_hdl, ctx_isp->recovery_req_id);
  5619. goto end;
  5620. }
  5621. if (ctx->state != CAM_CTX_ACTIVATED) {
  5622. spin_unlock(&ctx->lock);
  5623. CAM_ERR(CAM_ISP,
  5624. "In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld",
  5625. ctx->state, ctx->ctx_id,
  5626. ctx->link_hdl, ctx_isp->recovery_req_id);
  5627. rc = -EINVAL;
  5628. goto end;
  5629. }
  5630. if (list_empty(&ctx->pending_req_list)) {
  5631. /* Cannot start with no request */
  5632. spin_unlock(&ctx->lock);
  5633. CAM_ERR(CAM_ISP,
  5634. "Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x",
  5635. ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl);
  5636. rc = -EFAULT;
  5637. goto end;
  5638. }
  5639. spin_unlock(&ctx->lock);
  5640. if (!ctx_isp->hw_ctx) {
  5641. CAM_ERR(CAM_ISP,
  5642. "Invalid hw context pointer ctx: %u on link: 0x%x",
  5643. ctx->ctx_id, ctx->link_hdl);
  5644. rc = -EFAULT;
  5645. goto end;
  5646. }
  5647. req = list_first_entry(&ctx->pending_req_list,
  5648. struct cam_ctx_request, list);
  5649. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5650. req_isp->bubble_detected = false;
  5651. CAM_INFO(CAM_ISP,
  5652. "Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x",
  5653. req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl);
  5654. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5655. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  5656. stop_isp.stop_only = true;
  5657. stop_isp.internal_trigger = true;
  5658. stop_args.args = (void *)&stop_isp;
  5659. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  5660. &stop_args);
  5661. if (rc) {
  5662. CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u",
  5663. rc, ctx->ctx_id);
  5664. goto end;
  5665. }
  5666. CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x",
  5667. ctx->ctx_id, ctx->link_hdl);
  5668. /* API provides provision to stream off and not resume as well in case of fatal errors */
  5669. if (skip_resume) {
  5670. atomic_set(&ctx_isp->internal_recovery_set, 0);
  5671. CAM_INFO(CAM_ISP,
  5672. "Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x",
  5673. ctx->ctx_id, ctx_isp->last_applied_req_id,
  5674. ctx_isp->recovery_req_id, ctx->link_hdl);
  5675. goto end;
  5676. }
  5677. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5678. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5679. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  5680. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5681. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5682. &hw_cmd_args);
  5683. if (rc) {
  5684. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u", rc, ctx->ctx_id);
  5685. goto end;
  5686. }
  5687. CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x",
  5688. ctx->ctx_id, ctx->link_hdl);
  5689. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5690. start_isp.hw_config.request_id = req->request_id;
  5691. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  5692. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  5693. start_isp.hw_config.priv = &req_isp->hw_update_data;
  5694. start_isp.hw_config.init_packet = 1;
  5695. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ;
  5696. start_isp.hw_config.cdm_reset_before_apply = false;
  5697. start_isp.start_only = true;
  5698. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  5699. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  5700. CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
  5701. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  5702. &start_isp);
  5703. if (rc) {
  5704. CAM_ERR(CAM_ISP, "Start HW failed");
  5705. ctx->state = CAM_CTX_READY;
  5706. goto end;
  5707. }
  5708. /* IQ applied for this request, on next trigger skip IQ cfg */
  5709. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  5710. CAM_DBG(CAM_ISP, "Internal Start HW success ctx %u on link: 0x%x",
  5711. ctx->ctx_id, ctx->link_hdl);
  5712. end:
  5713. return rc;
  5714. }
  5715. static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
  5716. struct cam_req_mgr_link_evt_data *link_evt_data)
  5717. {
  5718. int rc = 0;
  5719. switch (link_evt_data->evt_type) {
  5720. case CAM_REQ_MGR_LINK_EVT_ERR:
  5721. /* No handling */
  5722. break;
  5723. case CAM_REQ_MGR_LINK_EVT_PAUSE:
  5724. rc = __cam_isp_ctx_link_pause(ctx);
  5725. break;
  5726. case CAM_REQ_MGR_LINK_EVT_RESUME:
  5727. rc = __cam_isp_ctx_link_resume(ctx);
  5728. break;
  5729. case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
  5730. rc = __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  5731. break;
  5732. case CAM_REQ_MGR_LINK_EVT_STALLED:
  5733. if (ctx->state == CAM_CTX_ACTIVATED)
  5734. rc = __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  5735. break;
  5736. default:
  5737. CAM_WARN(CAM_ISP,
  5738. "Unsupported event type: 0x%x on ctx: %u",
  5739. link_evt_data->evt_type, ctx->ctx_id);
  5740. rc = -EINVAL;
  5741. break;
  5742. }
  5743. return rc;
  5744. }
  5745. static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
  5746. struct cam_req_mgr_core_dev_link_setup *unlink)
  5747. {
  5748. int rc = 0;
  5749. CAM_WARN(CAM_ISP,
  5750. "Received unlink in activated state. It's unexpected");
  5751. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  5752. if (rc)
  5753. CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
  5754. rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
  5755. if (rc)
  5756. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  5757. return rc;
  5758. }
  5759. static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
  5760. struct cam_req_mgr_apply_request *apply)
  5761. {
  5762. int rc = 0;
  5763. struct cam_ctx_ops *ctx_ops = NULL;
  5764. struct cam_isp_context *ctx_isp =
  5765. (struct cam_isp_context *) ctx->ctx_priv;
  5766. trace_cam_apply_req("ISP", ctx->ctx_id, apply->request_id, apply->link_hdl);
  5767. CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld",
  5768. __cam_isp_ctx_substate_val_to_type(
  5769. ctx_isp->substate_activated), apply->request_id);
  5770. ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
  5771. if (ctx_ops->crm_ops.apply_req) {
  5772. rc = ctx_ops->crm_ops.apply_req(ctx, apply);
  5773. } else {
  5774. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5775. "No handle function in activated Substate[%s]",
  5776. __cam_isp_ctx_substate_val_to_type(
  5777. ctx_isp->substate_activated));
  5778. rc = -EFAULT;
  5779. }
  5780. if (rc)
  5781. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5782. "Apply failed in active Substate[%s] rc %d",
  5783. __cam_isp_ctx_substate_val_to_type(
  5784. ctx_isp->substate_activated), rc);
  5785. return rc;
  5786. }
  5787. static int __cam_isp_ctx_apply_default_settings(
  5788. struct cam_context *ctx,
  5789. struct cam_req_mgr_apply_request *apply)
  5790. {
  5791. int rc = 0;
  5792. struct cam_ctx_ops *ctx_ops = NULL;
  5793. struct cam_isp_context *ctx_isp =
  5794. (struct cam_isp_context *) ctx->ctx_priv;
  5795. if (!ctx_isp->use_default_apply)
  5796. return 0;
  5797. if (!(apply->trigger_point & ctx_isp->subscribe_event)) {
  5798. CAM_WARN(CAM_ISP,
  5799. "Trigger: %u not subscribed for: %u",
  5800. apply->trigger_point, ctx_isp->subscribe_event);
  5801. return 0;
  5802. }
  5803. /* Allow apply default settings for IFE only at SOF */
  5804. if (apply->trigger_point != CAM_TRIGGER_POINT_SOF)
  5805. return 0;
  5806. if ((ctx_isp->aeb_enabled) && (atomic_read(&ctx_isp->internal_recovery_set)))
  5807. return __cam_isp_ctx_reset_and_recover(false, ctx);
  5808. CAM_DBG(CAM_ISP,
  5809. "Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x",
  5810. ctx_isp->substate_activated, apply->request_id,
  5811. ctx->ctx_id, ctx->link_hdl);
  5812. ctx_ops = &ctx_isp->substate_machine[
  5813. ctx_isp->substate_activated];
  5814. if (ctx_ops->crm_ops.notify_frame_skip) {
  5815. rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply);
  5816. } else {
  5817. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5818. "No handle function in activated substate %d",
  5819. ctx_isp->substate_activated);
  5820. rc = -EFAULT;
  5821. }
  5822. if (rc)
  5823. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5824. "Apply default failed in active substate %d rc %d",
  5825. ctx_isp->substate_activated, rc);
  5826. return rc;
  5827. }
  5828. static int __cam_isp_ctx_handle_irq_in_activated(void *context,
  5829. uint32_t evt_id, void *evt_data)
  5830. {
  5831. int rc = 0;
  5832. struct cam_isp_ctx_irq_ops *irq_ops = NULL;
  5833. struct cam_context *ctx = (struct cam_context *)context;
  5834. struct cam_isp_context *ctx_isp =
  5835. (struct cam_isp_context *)ctx->ctx_priv;
  5836. spin_lock(&ctx->lock);
  5837. trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
  5838. __cam_isp_ctx_get_event_ts(evt_id, evt_data));
  5839. CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d",
  5840. ctx->state, __cam_isp_ctx_substate_val_to_type(
  5841. ctx_isp->substate_activated), evt_id);
  5842. irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
  5843. if (irq_ops->irq_ops[evt_id]) {
  5844. rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
  5845. } else {
  5846. CAM_DBG(CAM_ISP, "No handle function for Substate[%s]",
  5847. __cam_isp_ctx_substate_val_to_type(
  5848. ctx_isp->substate_activated));
  5849. if (isp_ctx_debug.enable_state_monitor_dump)
  5850. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  5851. }
  5852. CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s]",
  5853. ctx->state, __cam_isp_ctx_substate_val_to_type(
  5854. ctx_isp->substate_activated));
  5855. spin_unlock(&ctx->lock);
  5856. return rc;
  5857. }
  5858. /* top state machine */
  5859. static struct cam_ctx_ops
  5860. cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  5861. /* Uninit */
  5862. {
  5863. .ioctl_ops = {},
  5864. .crm_ops = {},
  5865. .irq_ops = NULL,
  5866. },
  5867. /* Available */
  5868. {
  5869. .ioctl_ops = {
  5870. .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
  5871. },
  5872. .crm_ops = {},
  5873. .irq_ops = NULL,
  5874. },
  5875. /* Acquired */
  5876. {
  5877. .ioctl_ops = {
  5878. .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
  5879. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  5880. .config_dev = __cam_isp_ctx_config_dev_in_acquired,
  5881. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  5882. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  5883. },
  5884. .crm_ops = {
  5885. .link = __cam_isp_ctx_link_in_acquired,
  5886. .unlink = __cam_isp_ctx_unlink_in_acquired,
  5887. .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
  5888. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  5889. .dump_req = __cam_isp_ctx_dump_in_top_state,
  5890. },
  5891. .irq_ops = NULL,
  5892. .pagefault_ops = cam_isp_context_dump_requests,
  5893. .dumpinfo_ops = cam_isp_context_info_dump,
  5894. },
  5895. /* Ready */
  5896. {
  5897. .ioctl_ops = {
  5898. .start_dev = __cam_isp_ctx_start_dev_in_ready,
  5899. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  5900. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  5901. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  5902. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  5903. },
  5904. .crm_ops = {
  5905. .unlink = __cam_isp_ctx_unlink_in_ready,
  5906. .flush_req = __cam_isp_ctx_flush_req_in_ready,
  5907. .dump_req = __cam_isp_ctx_dump_in_top_state,
  5908. },
  5909. .irq_ops = NULL,
  5910. .pagefault_ops = cam_isp_context_dump_requests,
  5911. .dumpinfo_ops = cam_isp_context_info_dump,
  5912. },
  5913. /* Flushed */
  5914. {
  5915. .ioctl_ops = {
  5916. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  5917. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  5918. .config_dev = __cam_isp_ctx_config_dev_in_flushed,
  5919. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  5920. },
  5921. .crm_ops = {
  5922. .unlink = __cam_isp_ctx_unlink_in_ready,
  5923. .process_evt = __cam_isp_ctx_process_evt,
  5924. },
  5925. .irq_ops = NULL,
  5926. .pagefault_ops = cam_isp_context_dump_requests,
  5927. .dumpinfo_ops = cam_isp_context_info_dump,
  5928. },
  5929. /* Activated */
  5930. {
  5931. .ioctl_ops = {
  5932. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  5933. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  5934. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  5935. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  5936. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  5937. },
  5938. .crm_ops = {
  5939. .unlink = __cam_isp_ctx_unlink_in_activated,
  5940. .apply_req = __cam_isp_ctx_apply_req,
  5941. .notify_frame_skip =
  5942. __cam_isp_ctx_apply_default_settings,
  5943. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  5944. .process_evt = __cam_isp_ctx_process_evt,
  5945. .dump_req = __cam_isp_ctx_dump_in_top_state,
  5946. },
  5947. .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
  5948. .pagefault_ops = cam_isp_context_dump_requests,
  5949. .dumpinfo_ops = cam_isp_context_info_dump,
  5950. .recovery_ops = cam_isp_context_hw_recovery,
  5951. },
  5952. };
  5953. static int cam_isp_context_hw_recovery(void *priv, void *data)
  5954. {
  5955. struct cam_context *ctx = priv;
  5956. int rc = -EPERM;
  5957. if (ctx->hw_mgr_intf->hw_recovery)
  5958. rc = ctx->hw_mgr_intf->hw_recovery(ctx->hw_mgr_intf->hw_mgr_priv, data);
  5959. else
  5960. CAM_ERR(CAM_ISP, "hw mgr doesn't support recovery");
  5961. return rc;
  5962. }
  5963. static int cam_isp_context_dump_requests(void *data,
  5964. struct cam_smmu_pf_info *pf_info)
  5965. {
  5966. struct cam_context *ctx = (struct cam_context *)data;
  5967. struct cam_ctx_request *req = NULL;
  5968. struct cam_ctx_request *req_temp = NULL;
  5969. struct cam_isp_ctx_req *req_isp = NULL;
  5970. struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
  5971. struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
  5972. struct cam_req_mgr_message req_msg = {0};
  5973. struct cam_isp_context *ctx_isp;
  5974. uint32_t resource_type = 0;
  5975. bool mem_found = false, ctx_found = false, send_error = false;
  5976. int rc = 0;
  5977. struct cam_isp_context *isp_ctx =
  5978. (struct cam_isp_context *)ctx->ctx_priv;
  5979. if (!isp_ctx) {
  5980. CAM_ERR(CAM_ISP, "Invalid isp ctx");
  5981. return -EINVAL;
  5982. }
  5983. CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
  5984. ctx->ctx_id, ctx->state);
  5985. list_for_each_entry_safe(req, req_temp,
  5986. &ctx->active_req_list, list) {
  5987. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5988. hw_update_data = &req_isp->hw_update_data;
  5989. pf_dbg_entry = &(req->pf_data);
  5990. CAM_INFO(CAM_ISP, "Active List: req_id : %lld ",
  5991. req->request_id);
  5992. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  5993. &mem_found, &ctx_found, &resource_type, pf_info);
  5994. if (rc)
  5995. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  5996. if (ctx_found)
  5997. send_error = true;
  5998. }
  5999. CAM_INFO(CAM_ISP, "Iterating over wait_list of isp ctx %d state %d",
  6000. ctx->ctx_id, ctx->state);
  6001. list_for_each_entry_safe(req, req_temp,
  6002. &ctx->wait_req_list, list) {
  6003. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6004. hw_update_data = &req_isp->hw_update_data;
  6005. pf_dbg_entry = &(req->pf_data);
  6006. CAM_INFO(CAM_ISP, "Wait List: req_id : %lld ", req->request_id);
  6007. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  6008. &mem_found, &ctx_found, &resource_type, pf_info);
  6009. if (rc)
  6010. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  6011. if (ctx_found)
  6012. send_error = true;
  6013. }
  6014. /*
  6015. * In certain scenarios we observe both overflow and SMMU pagefault
  6016. * for a particular request. If overflow is handled before page fault
  6017. * we need to traverse through pending request list because if
  6018. * bubble recovery is enabled on any request we move that request
  6019. * and all the subsequent requests to the pending list while handling
  6020. * overflow error.
  6021. */
  6022. CAM_INFO(CAM_ISP,
  6023. "Iterating over pending req list of isp ctx %d state %d",
  6024. ctx->ctx_id, ctx->state);
  6025. list_for_each_entry_safe(req, req_temp,
  6026. &ctx->pending_req_list, list) {
  6027. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6028. hw_update_data = &req_isp->hw_update_data;
  6029. pf_dbg_entry = &(req->pf_data);
  6030. CAM_INFO(CAM_ISP, "Pending List: req_id : %lld ",
  6031. req->request_id);
  6032. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  6033. &mem_found, &ctx_found, &resource_type, pf_info);
  6034. if (rc)
  6035. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  6036. if (ctx_found)
  6037. send_error = true;
  6038. }
  6039. if (resource_type) {
  6040. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  6041. CAM_ERR(CAM_ISP,
  6042. "Page fault on resource:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  6043. __cam_isp_resource_handle_id_to_type(
  6044. ctx_isp->isp_device_type, resource_type),
  6045. resource_type, ctx->ctx_id, ctx_isp->frame_id,
  6046. ctx_isp->reported_req_id, ctx_isp->last_applied_req_id);
  6047. }
  6048. if (send_error) {
  6049. CAM_INFO(CAM_ISP,
  6050. "page fault notifying to umd ctx %u session_hdl:%d device_hdl:%d link_hdl:%d",
  6051. ctx->ctx_id, ctx->session_hdl,
  6052. ctx->dev_hdl, ctx->link_hdl);
  6053. req_msg.session_hdl = ctx->session_hdl;
  6054. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  6055. req_msg.u.err_msg.error_type =
  6056. CAM_REQ_MGR_ERROR_TYPE_PAGE_FAULT;
  6057. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  6058. req_msg.u.err_msg.request_id = 0;
  6059. req_msg.u.err_msg.resource_size = 0x0;
  6060. req_msg.u.err_msg.error_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  6061. if (cam_req_mgr_notify_message(&req_msg,
  6062. V4L_EVENT_CAM_REQ_MGR_ERROR,
  6063. V4L_EVENT_CAM_REQ_MGR_EVENT))
  6064. CAM_ERR(CAM_ISP,
  6065. "could not send page fault notification ctx %u session_hdl:%d device_hdl:%d link_hdl:%d",
  6066. ctx->ctx_id, ctx->session_hdl,
  6067. ctx->dev_hdl, ctx->link_hdl);
  6068. }
  6069. return rc;
  6070. }
  6071. static int cam_isp_context_debug_register(void)
  6072. {
  6073. int rc = 0;
  6074. struct dentry *dbgfileptr = NULL;
  6075. dbgfileptr = debugfs_create_dir("camera_isp_ctx", NULL);
  6076. if (!dbgfileptr) {
  6077. CAM_ERR(CAM_ISP, "DebugFS could not create directory!");
  6078. rc = -ENOENT;
  6079. goto end;
  6080. }
  6081. /* Store parent inode for cleanup in caller */
  6082. isp_ctx_debug.dentry = dbgfileptr;
  6083. debugfs_create_u32("enable_state_monitor_dump", 0644,
  6084. isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
  6085. debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
  6086. isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
  6087. debugfs_create_bool("disable_internal_recovery", 0644,
  6088. isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery);
  6089. if (IS_ERR(dbgfileptr)) {
  6090. if (PTR_ERR(dbgfileptr) == -ENODEV)
  6091. CAM_WARN(CAM_ISP, "DebugFS not enabled in kernel!");
  6092. else
  6093. rc = PTR_ERR(dbgfileptr);
  6094. }
  6095. end:
  6096. return rc;
  6097. }
  6098. int cam_isp_context_init(struct cam_isp_context *ctx,
  6099. struct cam_context *ctx_base,
  6100. struct cam_req_mgr_kmd_ops *crm_node_intf,
  6101. struct cam_hw_mgr_intf *hw_intf,
  6102. uint32_t ctx_id,
  6103. uint32_t isp_device_type,
  6104. int img_iommu_hdl)
  6105. {
  6106. int rc = -1;
  6107. int i;
  6108. if (!ctx || !ctx_base) {
  6109. CAM_ERR(CAM_ISP, "Invalid Context");
  6110. goto err;
  6111. }
  6112. /* ISP context setup */
  6113. memset(ctx, 0, sizeof(*ctx));
  6114. ctx->base = ctx_base;
  6115. ctx->frame_id = 0;
  6116. ctx->custom_enabled = false;
  6117. ctx->use_frame_header_ts = false;
  6118. ctx->use_default_apply = false;
  6119. ctx->active_req_cnt = 0;
  6120. ctx->reported_req_id = 0;
  6121. ctx->bubble_frame_cnt = 0;
  6122. ctx->req_info.last_bufdone_req_id = 0;
  6123. ctx->v4l2_event_sub_ids = 0;
  6124. ctx->hw_ctx = NULL;
  6125. ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  6126. ctx->substate_machine = cam_isp_ctx_activated_state_machine;
  6127. ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
  6128. ctx->init_timestamp = jiffies_to_msecs(jiffies);
  6129. ctx->isp_device_type = isp_device_type;
  6130. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  6131. ctx->req_base[i].req_priv = &ctx->req_isp[i];
  6132. ctx->req_isp[i].base = &ctx->req_base[i];
  6133. }
  6134. /* camera context setup */
  6135. rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
  6136. crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX, img_iommu_hdl);
  6137. if (rc) {
  6138. CAM_ERR(CAM_ISP, "Camera Context Base init failed");
  6139. goto err;
  6140. }
  6141. /* link camera context with isp context */
  6142. ctx_base->state_machine = cam_isp_ctx_top_state_machine;
  6143. ctx_base->ctx_priv = ctx;
  6144. /* initializing current state for error logging */
  6145. for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
  6146. ctx->cam_isp_ctx_state_monitor[i].curr_state =
  6147. CAM_ISP_CTX_ACTIVATED_MAX;
  6148. }
  6149. atomic64_set(&ctx->state_monitor_head, -1);
  6150. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6151. atomic64_set(&ctx->event_record_head[i], -1);
  6152. if (!isp_ctx_debug.dentry)
  6153. cam_isp_context_debug_register();
  6154. err:
  6155. return rc;
  6156. }
  6157. int cam_isp_context_deinit(struct cam_isp_context *ctx)
  6158. {
  6159. if (ctx->base)
  6160. cam_context_deinit(ctx->base);
  6161. if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
  6162. CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid",
  6163. __cam_isp_ctx_substate_val_to_type(
  6164. ctx->substate_activated));
  6165. debugfs_remove_recursive(isp_ctx_debug.dentry);
  6166. isp_ctx_debug.dentry = NULL;
  6167. memset(ctx, 0, sizeof(*ctx));
  6168. return 0;
  6169. }