cam_isp_context.c 221 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/videodev2.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ratelimit.h>
  11. #include "cam_mem_mgr.h"
  12. #include "cam_sync_api.h"
  13. #include "cam_req_mgr_dev.h"
  14. #include "cam_trace.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_packet_util.h"
  17. #include "cam_context_utils.h"
  18. #include "cam_cdm_util.h"
  19. #include "cam_isp_context.h"
  20. #include "cam_common_util.h"
  21. #include "cam_req_mgr_debug.h"
  22. #include "cam_cpas_api.h"
  23. #include "cam_ife_hw_mgr.h"
  24. static const char isp_dev_name[] = "cam-isp";
  25. static struct cam_isp_ctx_debug isp_ctx_debug;
  26. #define INC_HEAD(head, max_entries, ret) \
  27. div_u64_rem(atomic64_add_return(1, head),\
  28. max_entries, (ret))
  29. static int cam_isp_context_dump_requests(void *data,
  30. struct cam_smmu_pf_info *pf_info);
  31. static int cam_isp_context_hw_recovery(void *priv, void *data);
  32. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  33. struct cam_start_stop_dev_cmd *cmd);
  34. static void __cam_isp_ctx_dump_state_monitor_array(
  35. struct cam_isp_context *ctx_isp);
  36. static const char *__cam_isp_hw_evt_val_to_type(
  37. uint32_t evt_id);
  38. static const char *__cam_isp_ctx_substate_val_to_type(
  39. enum cam_isp_ctx_activated_substate type);
  40. static const char *__cam_isp_evt_val_to_type(
  41. uint32_t evt_id)
  42. {
  43. switch (evt_id) {
  44. case CAM_ISP_CTX_EVENT_SUBMIT:
  45. return "SUBMIT";
  46. case CAM_ISP_CTX_EVENT_APPLY:
  47. return "APPLY";
  48. case CAM_ISP_CTX_EVENT_EPOCH:
  49. return "EPOCH";
  50. case CAM_ISP_CTX_EVENT_RUP:
  51. return "RUP";
  52. case CAM_ISP_CTX_EVENT_BUFDONE:
  53. return "BUFDONE";
  54. default:
  55. return "CAM_ISP_EVENT_INVALID";
  56. }
  57. }
  58. static void __cam_isp_ctx_update_event_record(
  59. struct cam_isp_context *ctx_isp,
  60. enum cam_isp_ctx_event event,
  61. struct cam_ctx_request *req)
  62. {
  63. int iterator = 0;
  64. ktime_t cur_time;
  65. struct cam_isp_ctx_req *req_isp;
  66. if (!ctx_isp) {
  67. CAM_ERR(CAM_ISP, "Invalid Args");
  68. return;
  69. }
  70. switch (event) {
  71. case CAM_ISP_CTX_EVENT_EPOCH:
  72. case CAM_ISP_CTX_EVENT_RUP:
  73. case CAM_ISP_CTX_EVENT_BUFDONE:
  74. break;
  75. case CAM_ISP_CTX_EVENT_SUBMIT:
  76. case CAM_ISP_CTX_EVENT_APPLY:
  77. if (!req) {
  78. CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
  79. return;
  80. }
  81. break;
  82. default:
  83. break;
  84. }
  85. INC_HEAD(&ctx_isp->event_record_head[event],
  86. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
  87. cur_time = ktime_get();
  88. if (req) {
  89. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  90. ctx_isp->event_record[event][iterator].req_id =
  91. req->request_id;
  92. req_isp->event_timestamp[event] = cur_time;
  93. } else {
  94. ctx_isp->event_record[event][iterator].req_id = 0;
  95. }
  96. ctx_isp->event_record[event][iterator].timestamp = cur_time;
  97. }
  98. static void *cam_isp_ctx_user_dump_events(
  99. void *dump_struct, uint8_t *addr_ptr)
  100. {
  101. uint64_t *addr;
  102. struct cam_isp_context_event_record *record;
  103. struct timespec64 ts;
  104. record = (struct cam_isp_context_event_record *)dump_struct;
  105. addr = (uint64_t *)addr_ptr;
  106. ts = ktime_to_timespec64(record->timestamp);
  107. *addr++ = record->req_id;
  108. *addr++ = ts.tv_sec;
  109. *addr++ = ts.tv_nsec / NSEC_PER_USEC;
  110. return addr;
  111. }
  112. static int __cam_isp_ctx_dump_event_record(
  113. struct cam_isp_context *ctx_isp,
  114. struct cam_common_hw_dump_args *dump_args)
  115. {
  116. int i, j, rc = 0;
  117. int index;
  118. size_t remain_len;
  119. uint32_t oldest_entry, num_entries;
  120. uint32_t min_len;
  121. uint64_t state_head;
  122. struct cam_isp_context_event_record *record;
  123. if (!dump_args || !ctx_isp) {
  124. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  125. dump_args, ctx_isp);
  126. return -EINVAL;
  127. }
  128. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  129. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  130. if (state_head == -1) {
  131. return 0;
  132. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  133. num_entries = state_head + 1;
  134. oldest_entry = 0;
  135. } else {
  136. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  137. div_u64_rem(state_head + 1,
  138. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  139. &oldest_entry);
  140. }
  141. index = oldest_entry;
  142. if (dump_args->buf_len <= dump_args->offset) {
  143. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  144. dump_args->buf_len, dump_args->offset);
  145. return -ENOSPC;
  146. }
  147. min_len = sizeof(struct cam_isp_context_dump_header) +
  148. ((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
  149. sizeof(uint64_t));
  150. remain_len = dump_args->buf_len - dump_args->offset;
  151. if (remain_len < min_len) {
  152. CAM_WARN(CAM_ISP,
  153. "Dump buffer exhaust remain %zu min %u",
  154. remain_len, min_len);
  155. return -ENOSPC;
  156. }
  157. for (j = 0; j < num_entries; j++) {
  158. record = &ctx_isp->event_record[i][index];
  159. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_events,
  160. record, sizeof(uint64_t), "ISP_EVT_%s:",
  161. __cam_isp_evt_val_to_type(i));
  162. if (rc) {
  163. CAM_ERR(CAM_ISP,
  164. "CAM_ISP_CONTEXT DUMP_EVENT_RECORD: Dump failed, rc: %d",
  165. rc);
  166. return rc;
  167. }
  168. index = (index + 1) %
  169. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  170. }
  171. }
  172. return rc;
  173. }
  174. static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req,
  175. uint8_t *start_addr, uint8_t *end_addr,
  176. unsigned long *bytes_updated)
  177. {
  178. struct cam_isp_ctx_req_mini_dump *req_md;
  179. struct cam_buf_io_cfg *io_cfg;
  180. struct cam_isp_ctx_req *req_isp;
  181. struct cam_packet *packet = NULL;
  182. unsigned long bytes_required = 0;
  183. bytes_required = sizeof(*req_md);
  184. *bytes_updated = 0;
  185. if (start_addr + bytes_required > end_addr)
  186. return;
  187. req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr;
  188. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  189. req_md->num_acked = req_isp->num_acked;
  190. req_md->num_deferred_acks = req_isp->num_deferred_acks;
  191. req_md->bubble_report = req_isp->bubble_report;
  192. req_md->bubble_detected = req_isp->bubble_detected;
  193. req_md->reapply_type = req_isp->reapply_type;
  194. req_md->request_id = req->request_id;
  195. *bytes_updated += bytes_required;
  196. if (req_isp->num_fence_map_out) {
  197. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  198. req_isp->num_fence_map_out;
  199. if (start_addr + *bytes_updated + bytes_required > end_addr)
  200. return;
  201. req_md->map_out = (struct cam_hw_fence_map_entry *)
  202. ((uint8_t *)start_addr + *bytes_updated);
  203. memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required);
  204. req_md->num_fence_map_out = req_isp->num_fence_map_out;
  205. *bytes_updated += bytes_required;
  206. }
  207. if (req_isp->num_fence_map_in) {
  208. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  209. req_isp->num_fence_map_in;
  210. if (start_addr + *bytes_updated + bytes_required > end_addr)
  211. return;
  212. req_md->map_in = (struct cam_hw_fence_map_entry *)
  213. ((uint8_t *)start_addr + *bytes_updated);
  214. memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required);
  215. req_md->num_fence_map_in = req_isp->num_fence_map_in;
  216. *bytes_updated += bytes_required;
  217. }
  218. packet = req_isp->hw_update_data.packet;
  219. if (packet && packet->num_io_configs) {
  220. bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg);
  221. if (start_addr + *bytes_updated + bytes_required > end_addr)
  222. return;
  223. io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
  224. packet->io_configs_offset / 4);
  225. req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated);
  226. memcpy(req_md->io_cfg, io_cfg, bytes_required);
  227. *bytes_updated += bytes_required;
  228. req_md->num_io_cfg = packet->num_io_configs;
  229. }
  230. }
  231. static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
  232. {
  233. struct cam_isp_ctx_mini_dump_info *md;
  234. struct cam_isp_context *ctx_isp;
  235. struct cam_context *ctx;
  236. struct cam_ctx_request *req, *req_temp;
  237. struct cam_hw_mini_dump_args *dump_args;
  238. uint8_t *start_addr;
  239. uint8_t *end_addr;
  240. unsigned long total_bytes = 0;
  241. unsigned long bytes_updated = 0;
  242. uint32_t i;
  243. if (!priv || !args) {
  244. CAM_ERR(CAM_ISP, "invalid params");
  245. return 0;
  246. }
  247. dump_args = (struct cam_hw_mini_dump_args *)args;
  248. if (dump_args->len < sizeof(*md)) {
  249. CAM_ERR(CAM_ISP,
  250. "In sufficient size received %lu required size: %zu",
  251. dump_args->len, sizeof(*md));
  252. return 0;
  253. }
  254. ctx = (struct cam_context *)priv;
  255. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  256. start_addr = (uint8_t *)dump_args->start_addr;
  257. end_addr = start_addr + dump_args->len;
  258. md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr;
  259. md->sof_timestamp_val = ctx_isp->sof_timestamp_val;
  260. md->boot_timestamp = ctx_isp->boot_timestamp;
  261. md->last_sof_timestamp = ctx_isp->last_sof_timestamp;
  262. md->init_timestamp = ctx_isp->init_timestamp;
  263. md->frame_id = ctx_isp->frame_id;
  264. md->reported_req_id = ctx_isp->reported_req_id;
  265. md->last_applied_req_id = ctx_isp->last_applied_req_id;
  266. md->last_bufdone_err_apply_req_id =
  267. ctx_isp->last_bufdone_err_apply_req_id;
  268. md->frame_id_meta = ctx_isp->frame_id_meta;
  269. md->substate_activated = ctx_isp->substate_activated;
  270. md->ctx_id = ctx->ctx_id;
  271. md->subscribe_event = ctx_isp->subscribe_event;
  272. md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt;
  273. md->isp_device_type = ctx_isp->isp_device_type;
  274. md->active_req_cnt = ctx_isp->active_req_cnt;
  275. md->trigger_id = ctx_isp->trigger_id;
  276. md->rdi_only_context = ctx_isp->rdi_only_context;
  277. md->offline_context = ctx_isp->offline_context;
  278. md->hw_acquired = ctx_isp->hw_acquired;
  279. md->init_received = ctx_isp->init_received;
  280. md->split_acquire = ctx_isp->split_acquire;
  281. md->use_frame_header_ts = ctx_isp->use_frame_header_ts;
  282. md->support_consumed_addr = ctx_isp->support_consumed_addr;
  283. md->use_default_apply = ctx_isp->use_default_apply;
  284. md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress);
  285. md->process_bubble = atomic_read(&ctx_isp->process_bubble);
  286. md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
  287. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  288. memcpy(md->event_record[i], ctx_isp->event_record[i],
  289. sizeof(struct cam_isp_context_event_record) *
  290. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
  291. }
  292. total_bytes += sizeof(*md);
  293. if (start_addr + total_bytes >= end_addr)
  294. goto end;
  295. if (!list_empty(&ctx->active_req_list)) {
  296. md->active_list = (struct cam_isp_ctx_req_mini_dump *)
  297. (start_addr + total_bytes);
  298. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  299. bytes_updated = 0;
  300. __cam_isp_ctx_req_mini_dump(req,
  301. (uint8_t *)&md->active_list[md->active_cnt++],
  302. end_addr, &bytes_updated);
  303. total_bytes += bytes_updated;
  304. if ((start_addr + total_bytes >= end_addr))
  305. goto end;
  306. }
  307. }
  308. if (!list_empty(&ctx->wait_req_list)) {
  309. md->wait_list = (struct cam_isp_ctx_req_mini_dump *)
  310. (start_addr + total_bytes);
  311. list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
  312. bytes_updated = 0;
  313. __cam_isp_ctx_req_mini_dump(req,
  314. (uint8_t *)&md->wait_list[md->wait_cnt++],
  315. end_addr, &bytes_updated);
  316. total_bytes += bytes_updated;
  317. if ((start_addr + total_bytes >= end_addr))
  318. goto end;
  319. }
  320. }
  321. if (!list_empty(&ctx->pending_req_list)) {
  322. md->pending_list = (struct cam_isp_ctx_req_mini_dump *)
  323. (start_addr + total_bytes);
  324. list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
  325. bytes_updated = 0;
  326. __cam_isp_ctx_req_mini_dump(req,
  327. (uint8_t *)&md->pending_list[md->pending_cnt++],
  328. end_addr, &bytes_updated);
  329. total_bytes += bytes_updated;
  330. if ((start_addr + total_bytes >= end_addr))
  331. goto end;
  332. }
  333. }
  334. end:
  335. dump_args->bytes_written = total_bytes;
  336. return 0;
  337. }
  338. static void __cam_isp_ctx_update_state_monitor_array(
  339. struct cam_isp_context *ctx_isp,
  340. enum cam_isp_state_change_trigger trigger_type,
  341. uint64_t req_id)
  342. {
  343. int iterator;
  344. INC_HEAD(&ctx_isp->state_monitor_head,
  345. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
  346. ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
  347. ctx_isp->substate_activated;
  348. ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
  349. ctx_isp->frame_id;
  350. ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
  351. trigger_type;
  352. ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
  353. req_id;
  354. ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
  355. jiffies_to_msecs(jiffies) - ctx_isp->init_timestamp;
  356. }
  357. static const char *__cam_isp_ctx_substate_val_to_type(
  358. enum cam_isp_ctx_activated_substate type)
  359. {
  360. switch (type) {
  361. case CAM_ISP_CTX_ACTIVATED_SOF:
  362. return "SOF";
  363. case CAM_ISP_CTX_ACTIVATED_APPLIED:
  364. return "APPLIED";
  365. case CAM_ISP_CTX_ACTIVATED_EPOCH:
  366. return "EPOCH";
  367. case CAM_ISP_CTX_ACTIVATED_BUBBLE:
  368. return "BUBBLE";
  369. case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
  370. return "BUBBLE_APPLIED";
  371. case CAM_ISP_CTX_ACTIVATED_HW_ERROR:
  372. return "HW_ERROR";
  373. case CAM_ISP_CTX_ACTIVATED_HALT:
  374. return "HALT";
  375. default:
  376. return "INVALID";
  377. }
  378. }
  379. static const char *__cam_isp_hw_evt_val_to_type(
  380. uint32_t evt_id)
  381. {
  382. switch (evt_id) {
  383. case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
  384. return "ERROR";
  385. case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED:
  386. return "APPLIED";
  387. case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
  388. return "SOF";
  389. case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
  390. return "REG_UPDATE";
  391. case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
  392. return "EPOCH";
  393. case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
  394. return "EOF";
  395. case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
  396. return "DONE";
  397. case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
  398. return "FLUSH";
  399. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF:
  400. return "SEC_EVT_SOF";
  401. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH:
  402. return "SEC_EVT_EPOCH";
  403. case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP:
  404. return "OUT_OF_SYNC_FRAME_DROP";
  405. default:
  406. return "CAM_ISP_EVENT_INVALID";
  407. }
  408. }
  409. static void __cam_isp_ctx_dump_state_monitor_array(
  410. struct cam_isp_context *ctx_isp)
  411. {
  412. int i = 0;
  413. int64_t state_head = 0;
  414. uint32_t index, num_entries, oldest_entry;
  415. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  416. if (state_head == -1) {
  417. return;
  418. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  419. num_entries = state_head;
  420. oldest_entry = 0;
  421. } else {
  422. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  423. div_u64_rem(state_head + 1,
  424. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  425. }
  426. CAM_ERR(CAM_ISP,
  427. "Dumping state information for preceding requests");
  428. index = oldest_entry;
  429. for (i = 0; i < num_entries; i++) {
  430. CAM_ERR(CAM_ISP,
  431. "Index[%d] time[%d] : Substate[%s] Frame[%lld] ReqId[%llu] evt_type[%s]",
  432. index,
  433. ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
  434. __cam_isp_ctx_substate_val_to_type(
  435. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  436. ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
  437. ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
  438. __cam_isp_hw_evt_val_to_type(
  439. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  440. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  441. }
  442. }
  443. static void *cam_isp_ctx_user_dump_state_monitor_array_info(
  444. void *dump_struct, uint8_t *addr_ptr)
  445. {
  446. struct cam_isp_context_state_monitor *evt = NULL;
  447. uint64_t *addr;
  448. evt = (struct cam_isp_context_state_monitor *)dump_struct;
  449. addr = (uint64_t *)addr_ptr;
  450. *addr++ = evt->evt_time_stamp;
  451. *addr++ = evt->frame_id;
  452. *addr++ = evt->req_id;
  453. return addr;
  454. }
  455. static int __cam_isp_ctx_user_dump_state_monitor_array(
  456. struct cam_isp_context *ctx_isp,
  457. struct cam_common_hw_dump_args *dump_args)
  458. {
  459. int i, rc = 0;
  460. int index;
  461. uint32_t oldest_entry;
  462. uint32_t num_entries;
  463. uint64_t state_head;
  464. if (!dump_args || !ctx_isp) {
  465. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  466. dump_args, ctx_isp);
  467. return -EINVAL;
  468. }
  469. state_head = 0;
  470. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  471. if (state_head == -1) {
  472. return 0;
  473. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  474. num_entries = state_head;
  475. oldest_entry = 0;
  476. } else {
  477. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  478. div_u64_rem(state_head + 1,
  479. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  480. }
  481. CAM_ERR(CAM_ISP,
  482. "Dumping state information for preceding requests");
  483. index = oldest_entry;
  484. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  485. for (i = 0; i < num_entries; i++) {
  486. rc = cam_common_user_dump_helper(dump_args,
  487. cam_isp_ctx_user_dump_state_monitor_array_info,
  488. &ctx_isp->cam_isp_ctx_state_monitor[index],
  489. sizeof(uint64_t), "ISP_STATE_MONITOR.%s.%s:",
  490. __cam_isp_ctx_substate_val_to_type(
  491. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  492. __cam_isp_hw_evt_val_to_type(
  493. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  494. if (rc) {
  495. CAM_ERR(CAM_ISP, "CAM ISP CONTEXT: Event record dump failed, rc: %d", rc);
  496. return rc;
  497. }
  498. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  499. }
  500. return rc;
  501. }
  502. static int cam_isp_context_info_dump(void *context,
  503. enum cam_context_dump_id id)
  504. {
  505. struct cam_context *ctx = (struct cam_context *)context;
  506. switch (id) {
  507. case CAM_CTX_DUMP_ACQ_INFO: {
  508. cam_context_dump_hw_acq_info(ctx);
  509. break;
  510. }
  511. default:
  512. CAM_DBG(CAM_ISP, "DUMP id not valid %u", id);
  513. break;
  514. }
  515. return 0;
  516. }
  517. static const char *__cam_isp_ctx_crm_trigger_point_to_string(
  518. int trigger_point)
  519. {
  520. switch (trigger_point) {
  521. case CAM_TRIGGER_POINT_SOF:
  522. return "SOF";
  523. case CAM_TRIGGER_POINT_EOF:
  524. return "EOF";
  525. default:
  526. return "Invalid";
  527. }
  528. }
  529. static int __cam_isp_ctx_notify_trigger_util(
  530. int trigger_type, struct cam_isp_context *ctx_isp)
  531. {
  532. int rc = -EINVAL;
  533. struct cam_context *ctx = ctx_isp->base;
  534. struct cam_req_mgr_trigger_notify notify;
  535. /* Trigger type not supported, return */
  536. if (!(ctx_isp->subscribe_event & trigger_type)) {
  537. CAM_DBG(CAM_ISP,
  538. "%s trigger point not subscribed for in mask: %u in ctx: %u on link: 0x%x last_bufdone: %lld",
  539. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  540. ctx_isp->subscribe_event, ctx->ctx_id, ctx->link_hdl,
  541. ctx_isp->req_info.last_bufdone_req_id);
  542. return 0;
  543. }
  544. /* Skip CRM notify when recovery is in progress */
  545. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  546. CAM_DBG(CAM_ISP,
  547. "Internal recovery in progress skip notifying %s trigger point in ctx: %u on link: 0x%x",
  548. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  549. ctx->ctx_id, ctx->link_hdl);
  550. return 0;
  551. }
  552. notify.link_hdl = ctx->link_hdl;
  553. notify.dev_hdl = ctx->dev_hdl;
  554. notify.frame_id = ctx_isp->frame_id;
  555. notify.trigger = trigger_type;
  556. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  557. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  558. notify.trigger_id = ctx_isp->trigger_id;
  559. CAM_DBG(CAM_ISP,
  560. "Notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld",
  561. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  562. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  563. ctx_isp->req_info.last_bufdone_req_id);
  564. rc = ctx->ctx_crm_intf->notify_trigger(&notify);
  565. if (rc)
  566. CAM_ERR(CAM_ISP,
  567. "Failed to notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld rc: %d",
  568. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  569. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  570. ctx_isp->req_info.last_bufdone_req_id, rc);
  571. return rc;
  572. }
  573. static int __cam_isp_ctx_notify_v4l2_error_event(
  574. uint32_t error_type, uint32_t error_code,
  575. uint64_t error_request_id, struct cam_context *ctx)
  576. {
  577. int rc = 0;
  578. struct cam_req_mgr_message req_msg;
  579. req_msg.session_hdl = ctx->session_hdl;
  580. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  581. req_msg.u.err_msg.error_type = error_type;
  582. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  583. req_msg.u.err_msg.request_id = error_request_id;
  584. req_msg.u.err_msg.resource_size = 0x0;
  585. req_msg.u.err_msg.error_code = error_code;
  586. CAM_DBG(CAM_ISP,
  587. "v4l2 error event [type: %u code: %u] for req: %llu in ctx: %u on link: 0x%x notified successfully",
  588. error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl);
  589. rc = cam_req_mgr_notify_message(&req_msg,
  590. V4L_EVENT_CAM_REQ_MGR_ERROR,
  591. V4L_EVENT_CAM_REQ_MGR_EVENT);
  592. if (rc)
  593. CAM_ERR(CAM_ISP,
  594. "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu in ctx %u on link: 0x%x",
  595. error_request_id, ctx->ctx_id);
  596. return rc;
  597. }
  598. static int __cam_isp_ctx_notify_error_util(
  599. uint32_t trigger_type, enum cam_req_mgr_device_error error,
  600. uint64_t req_id, struct cam_isp_context *ctx_isp)
  601. {
  602. int rc = -EINVAL;
  603. struct cam_context *ctx = ctx_isp->base;
  604. struct cam_req_mgr_error_notify notify;
  605. notify.link_hdl = ctx->link_hdl;
  606. notify.dev_hdl = ctx->dev_hdl;
  607. notify.req_id = req_id;
  608. notify.error = error;
  609. notify.trigger = trigger_type;
  610. notify.frame_id = ctx_isp->frame_id;
  611. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  612. if (error == CRM_KMD_ERR_BUBBLE)
  613. CAM_WARN(CAM_ISP,
  614. "Notify CRM about bubble req: %llu frame: %llu in ctx: %u on link: 0x%x",
  615. req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  616. else
  617. CAM_ERR(CAM_ISP,
  618. "Notify CRM about fatal error: %u req: %llu frame: %llu in ctx: %u on link: 0x%x",
  619. error, req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  620. rc = ctx->ctx_crm_intf->notify_err(&notify);
  621. if (rc)
  622. CAM_ERR(CAM_ISP,
  623. "Failed to notify error: %u for req: %lu on ctx: %u in link: 0x%x",
  624. error, req_id, ctx->ctx_id, ctx->link_hdl);
  625. return rc;
  626. }
  627. static int __cam_isp_ctx_trigger_reg_dump(
  628. enum cam_hw_mgr_command cmd,
  629. struct cam_context *ctx)
  630. {
  631. int rc = 0;
  632. struct cam_hw_cmd_args hw_cmd_args;
  633. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  634. hw_cmd_args.cmd_type = cmd;
  635. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  636. &hw_cmd_args);
  637. if (rc) {
  638. CAM_ERR(CAM_ISP, "Reg dump on error failed ctx: %u link: 0x%x rc: %d",
  639. ctx->ctx_id, ctx->link_hdl, rc);
  640. goto end;
  641. }
  642. CAM_DBG(CAM_ISP,
  643. "Reg dump type: %u successful in ctx: %u on link: 0x%x",
  644. cmd, ctx->ctx_id, ctx->link_hdl);
  645. end:
  646. return rc;
  647. }
  648. static int __cam_isp_ctx_pause_crm_timer(
  649. struct cam_context *ctx)
  650. {
  651. int rc = -EINVAL;
  652. struct cam_req_mgr_timer_notify timer;
  653. if (!ctx || !ctx->ctx_crm_intf)
  654. goto end;
  655. timer.link_hdl = ctx->link_hdl;
  656. timer.dev_hdl = ctx->dev_hdl;
  657. timer.state = false;
  658. rc = ctx->ctx_crm_intf->notify_timer(&timer);
  659. if (rc) {
  660. CAM_ERR(CAM_ISP, "Failed to pause sof timer in ctx: %u on link: 0x%x",
  661. ctx->ctx_id, ctx->link_hdl);
  662. goto end;
  663. }
  664. CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx: %u link: 0x%x success",
  665. ctx->ctx_id, ctx->link_hdl);
  666. end:
  667. return rc;
  668. }
  669. static inline void __cam_isp_ctx_update_sof_ts_util(
  670. struct cam_isp_hw_sof_event_data *sof_event_data,
  671. struct cam_isp_context *ctx_isp)
  672. {
  673. /* Delayed update, skip if ts is already updated */
  674. if (ctx_isp->sof_timestamp_val == sof_event_data->timestamp)
  675. return;
  676. ctx_isp->frame_id++;
  677. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  678. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  679. }
  680. static int cam_isp_ctx_dump_req(
  681. struct cam_isp_ctx_req *req_isp,
  682. uintptr_t cpu_addr,
  683. size_t buf_len,
  684. size_t *offset,
  685. bool dump_to_buff)
  686. {
  687. int i, rc = 0;
  688. size_t len = 0;
  689. uint32_t *buf_addr;
  690. uint32_t *buf_start, *buf_end;
  691. size_t remain_len = 0;
  692. struct cam_cdm_cmd_buf_dump_info dump_info;
  693. for (i = 0; i < req_isp->num_cfg; i++) {
  694. rc = cam_packet_util_get_cmd_mem_addr(
  695. req_isp->cfg[i].handle, &buf_addr, &len);
  696. if (rc) {
  697. CAM_ERR_RATE_LIMIT(CAM_ISP,
  698. "Failed to get_cmd_mem_addr, rc=%d",
  699. rc);
  700. } else {
  701. if (req_isp->cfg[i].offset >= ((uint32_t)len)) {
  702. CAM_ERR(CAM_ISP,
  703. "Invalid offset exp %u actual %u",
  704. req_isp->cfg[i].offset, (uint32_t)len);
  705. return -EINVAL;
  706. }
  707. remain_len = len - req_isp->cfg[i].offset;
  708. if (req_isp->cfg[i].len >
  709. ((uint32_t)remain_len)) {
  710. CAM_ERR(CAM_ISP,
  711. "Invalid len exp %u remain_len %u",
  712. req_isp->cfg[i].len,
  713. (uint32_t)remain_len);
  714. return -EINVAL;
  715. }
  716. buf_start = (uint32_t *)((uint8_t *) buf_addr +
  717. req_isp->cfg[i].offset);
  718. buf_end = (uint32_t *)((uint8_t *) buf_start +
  719. req_isp->cfg[i].len - 1);
  720. if (dump_to_buff) {
  721. if (!cpu_addr || !offset || !buf_len) {
  722. CAM_ERR(CAM_ISP, "Invalid args");
  723. break;
  724. }
  725. dump_info.src_start = buf_start;
  726. dump_info.src_end = buf_end;
  727. dump_info.dst_start = cpu_addr;
  728. dump_info.dst_offset = *offset;
  729. dump_info.dst_max_size = buf_len;
  730. rc = cam_cdm_util_dump_cmd_bufs_v2(
  731. &dump_info);
  732. *offset = dump_info.dst_offset;
  733. if (rc)
  734. return rc;
  735. } else
  736. cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
  737. }
  738. }
  739. return rc;
  740. }
  741. static int __cam_isp_ctx_enqueue_request_in_order(
  742. struct cam_context *ctx, struct cam_ctx_request *req)
  743. {
  744. struct cam_ctx_request *req_current;
  745. struct cam_ctx_request *req_prev;
  746. struct list_head temp_list;
  747. struct cam_isp_context *ctx_isp;
  748. INIT_LIST_HEAD(&temp_list);
  749. spin_lock_bh(&ctx->lock);
  750. if (list_empty(&ctx->pending_req_list)) {
  751. list_add_tail(&req->list, &ctx->pending_req_list);
  752. } else {
  753. list_for_each_entry_safe_reverse(
  754. req_current, req_prev, &ctx->pending_req_list, list) {
  755. if (req->request_id < req_current->request_id) {
  756. list_del_init(&req_current->list);
  757. list_add(&req_current->list, &temp_list);
  758. continue;
  759. } else if (req->request_id == req_current->request_id) {
  760. CAM_WARN(CAM_ISP,
  761. "Received duplicated request %lld",
  762. req->request_id);
  763. }
  764. break;
  765. }
  766. list_add_tail(&req->list, &ctx->pending_req_list);
  767. if (!list_empty(&temp_list)) {
  768. list_for_each_entry_safe(
  769. req_current, req_prev, &temp_list, list) {
  770. list_del_init(&req_current->list);
  771. list_add_tail(&req_current->list,
  772. &ctx->pending_req_list);
  773. }
  774. }
  775. }
  776. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  777. __cam_isp_ctx_update_event_record(ctx_isp,
  778. CAM_ISP_CTX_EVENT_SUBMIT, req);
  779. spin_unlock_bh(&ctx->lock);
  780. return 0;
  781. }
  782. static int __cam_isp_ctx_enqueue_init_request(
  783. struct cam_context *ctx, struct cam_ctx_request *req)
  784. {
  785. int rc = 0;
  786. struct cam_ctx_request *req_old;
  787. struct cam_isp_ctx_req *req_isp_old;
  788. struct cam_isp_ctx_req *req_isp_new;
  789. struct cam_isp_prepare_hw_update_data *req_update_old;
  790. struct cam_isp_prepare_hw_update_data *req_update_new;
  791. struct cam_isp_prepare_hw_update_data *hw_update_data;
  792. spin_lock_bh(&ctx->lock);
  793. if (list_empty(&ctx->pending_req_list)) {
  794. list_add_tail(&req->list, &ctx->pending_req_list);
  795. CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
  796. req->request_id);
  797. goto end;
  798. }
  799. req_old = list_first_entry(&ctx->pending_req_list,
  800. struct cam_ctx_request, list);
  801. req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
  802. req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
  803. if (req_isp_old->hw_update_data.packet_opcode_type ==
  804. CAM_ISP_PACKET_INIT_DEV) {
  805. if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
  806. ctx->max_hw_update_entries) {
  807. CAM_WARN(CAM_ISP,
  808. "Can not merge INIT pkt num_cfgs = %d",
  809. (req_isp_old->num_cfg +
  810. req_isp_new->num_cfg));
  811. rc = -ENOMEM;
  812. }
  813. if (req_isp_old->num_fence_map_out != 0 ||
  814. req_isp_old->num_fence_map_in != 0) {
  815. CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
  816. rc = -EINVAL;
  817. }
  818. if (!rc) {
  819. memcpy(req_isp_old->fence_map_out,
  820. req_isp_new->fence_map_out,
  821. sizeof(req_isp_new->fence_map_out[0])*
  822. req_isp_new->num_fence_map_out);
  823. req_isp_old->num_fence_map_out =
  824. req_isp_new->num_fence_map_out;
  825. memcpy(req_isp_old->fence_map_in,
  826. req_isp_new->fence_map_in,
  827. sizeof(req_isp_new->fence_map_in[0])*
  828. req_isp_new->num_fence_map_in);
  829. req_isp_old->num_fence_map_in =
  830. req_isp_new->num_fence_map_in;
  831. memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
  832. req_isp_new->cfg,
  833. sizeof(req_isp_new->cfg[0]) *
  834. req_isp_new->num_cfg);
  835. req_isp_old->num_cfg += req_isp_new->num_cfg;
  836. memcpy(&req_old->pf_data, &req->pf_data,
  837. sizeof(struct cam_hw_mgr_dump_pf_data));
  838. if (req_isp_new->hw_update_data.num_reg_dump_buf) {
  839. req_update_new = &req_isp_new->hw_update_data;
  840. req_update_old = &req_isp_old->hw_update_data;
  841. memcpy(&req_update_old->reg_dump_buf_desc,
  842. &req_update_new->reg_dump_buf_desc,
  843. sizeof(struct cam_cmd_buf_desc) *
  844. req_update_new->num_reg_dump_buf);
  845. req_update_old->num_reg_dump_buf =
  846. req_update_new->num_reg_dump_buf;
  847. }
  848. /* Update HW update params for ePCR */
  849. hw_update_data = &req_isp_new->hw_update_data;
  850. req_isp_old->hw_update_data.frame_header_res_id =
  851. req_isp_new->hw_update_data.frame_header_res_id;
  852. req_isp_old->hw_update_data.frame_header_cpu_addr =
  853. hw_update_data->frame_header_cpu_addr;
  854. if (req_isp_new->hw_update_data.mup_en) {
  855. req_isp_old->hw_update_data.mup_en =
  856. req_isp_new->hw_update_data.mup_en;
  857. req_isp_old->hw_update_data.mup_val =
  858. req_isp_new->hw_update_data.mup_val;
  859. req_isp_old->hw_update_data.num_exp =
  860. req_isp_new->hw_update_data.num_exp;
  861. }
  862. req_old->request_id = req->request_id;
  863. list_add_tail(&req->list, &ctx->free_req_list);
  864. }
  865. } else {
  866. CAM_WARN(CAM_ISP,
  867. "Received Update pkt before INIT pkt. req_id= %lld",
  868. req->request_id);
  869. rc = -EINVAL;
  870. }
  871. end:
  872. spin_unlock_bh(&ctx->lock);
  873. return rc;
  874. }
  875. static char *__cam_isp_ife_sfe_resource_handle_id_to_type(
  876. uint32_t resource_handle)
  877. {
  878. switch (resource_handle) {
  879. /* IFE output ports */
  880. case CAM_ISP_IFE_OUT_RES_FULL: return "IFE_FULL";
  881. case CAM_ISP_IFE_OUT_RES_DS4: return "IFE_DS4";
  882. case CAM_ISP_IFE_OUT_RES_DS16: return "IFE_DS16";
  883. case CAM_ISP_IFE_OUT_RES_RAW_DUMP: return "IFE_RAW_DUMP";
  884. case CAM_ISP_IFE_OUT_RES_FD: return "IFE_FD";
  885. case CAM_ISP_IFE_OUT_RES_PDAF: return "IFE_PDAF";
  886. case CAM_ISP_IFE_OUT_RES_RDI_0: return "IFE_RDI_0";
  887. case CAM_ISP_IFE_OUT_RES_RDI_1: return "IFE_RDI_1";
  888. case CAM_ISP_IFE_OUT_RES_RDI_2: return "IFE_RDI_2";
  889. case CAM_ISP_IFE_OUT_RES_RDI_3: return "IFE_RDI_3";
  890. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: return "IFE_STATS_HDR_BE";
  891. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: return "IFE_STATS_HDR_BHIST";
  892. case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: return "IFE_STATS_TL_BG";
  893. case CAM_ISP_IFE_OUT_RES_STATS_BF: return "IFE_STATS_BF";
  894. case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: return "IFE_STATS_AWB_BG";
  895. case CAM_ISP_IFE_OUT_RES_STATS_BHIST: return "IFE_STATS_BHIST";
  896. case CAM_ISP_IFE_OUT_RES_STATS_RS: return "IFE_STATS_RS";
  897. case CAM_ISP_IFE_OUT_RES_STATS_CS: return "IFE_STATS_CS";
  898. case CAM_ISP_IFE_OUT_RES_STATS_IHIST: return "IFE_STATS_IHIST";
  899. case CAM_ISP_IFE_OUT_RES_FULL_DISP: return "IFE_FULL_DISP";
  900. case CAM_ISP_IFE_OUT_RES_DS4_DISP: return "IFE_DS4_DISP";
  901. case CAM_ISP_IFE_OUT_RES_DS16_DISP: return "IFE_DS16_DISP";
  902. case CAM_ISP_IFE_OUT_RES_2PD: return "IFE_2PD";
  903. case CAM_ISP_IFE_OUT_RES_LCR: return "IFE_LCR";
  904. case CAM_ISP_IFE_OUT_RES_AWB_BFW: return "IFE_AWB_BFW";
  905. case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: return "IFE_PREPROCESS_2PD";
  906. case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: return "IFE_STATS_AEC_BE";
  907. case CAM_ISP_IFE_OUT_RES_LTM_STATS: return "IFE_LTM_STATS";
  908. case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: return "IFE_STATS_GTM_BHIST";
  909. case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: return "IFE_STATS_BG";
  910. case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: return "IFE_PREPROCESS_RAW";
  911. case CAM_ISP_IFE_OUT_RES_SPARSE_PD: return "IFE_SPARSE_PD";
  912. case CAM_ISP_IFE_OUT_RES_STATS_CAF: return "IFE_STATS_CAF";
  913. case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: return "IFE_STATS_BAYER_RS";
  914. case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: return "IFE_PDAF_PARSED_DATA";
  915. /* SFE output ports */
  916. case CAM_ISP_SFE_OUT_RES_RDI_0: return "SFE_RDI_0";
  917. case CAM_ISP_SFE_OUT_RES_RDI_1: return "SFE_RDI_1";
  918. case CAM_ISP_SFE_OUT_RES_RDI_2: return "SFE_RDI_2";
  919. case CAM_ISP_SFE_OUT_RES_RDI_3: return "SFE_RDI_3";
  920. case CAM_ISP_SFE_OUT_RES_RDI_4: return "SFE_RDI_4";
  921. case CAM_ISP_SFE_OUT_BE_STATS_0: return "SFE_BE_STATS_0";
  922. case CAM_ISP_SFE_OUT_BE_STATS_1: return "SFE_BE_STATS_1";
  923. case CAM_ISP_SFE_OUT_BE_STATS_2: return "SFE_BE_STATS_2";
  924. case CAM_ISP_SFE_OUT_BHIST_STATS_0: return "SFE_BHIST_STATS_0";
  925. case CAM_ISP_SFE_OUT_BHIST_STATS_1: return "SFE_BHIST_STATS_1";
  926. case CAM_ISP_SFE_OUT_BHIST_STATS_2: return "SFE_BHIST_STATS_2";
  927. case CAM_ISP_SFE_OUT_RES_LCR: return "SFE_LCR";
  928. case CAM_ISP_SFE_OUT_RES_RAW_DUMP: return "SFE_PROCESSED_RAW";
  929. case CAM_ISP_SFE_OUT_RES_IR: return "SFE_IR";
  930. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: return "SFE_RS_STATS_0";
  931. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: return "SFE_RS_STATS_1";
  932. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: return "SFE_RS_STATS_2";
  933. /* SFE input ports */
  934. case CAM_ISP_SFE_IN_RD_0: return "SFE_RD_0";
  935. case CAM_ISP_SFE_IN_RD_1: return "SFE_RD_1";
  936. case CAM_ISP_SFE_IN_RD_2: return "SFE_RD_2";
  937. /* Handle invalid type */
  938. default: return "Invalid_Resource_Type";
  939. }
  940. }
  941. static const char *__cam_isp_tfe_resource_handle_id_to_type(
  942. uint32_t resource_handle)
  943. {
  944. switch (resource_handle) {
  945. /* TFE output ports */
  946. case CAM_ISP_TFE_OUT_RES_FULL: return "TFE_FULL";
  947. case CAM_ISP_TFE_OUT_RES_RAW_DUMP: return "TFE_RAW_DUMP";
  948. case CAM_ISP_TFE_OUT_RES_PDAF: return "TFE_PDAF";
  949. case CAM_ISP_TFE_OUT_RES_RDI_0: return "TFE_RDI_0";
  950. case CAM_ISP_TFE_OUT_RES_RDI_1: return "TFE_RDI_1";
  951. case CAM_ISP_TFE_OUT_RES_RDI_2: return "TFE_RDI_2";
  952. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: return "TFE_STATS_HDR_BE";
  953. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: return "TFE_STATS_HDR_BHIST";
  954. case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: return "TFE_STATS_TL_BG";
  955. case CAM_ISP_TFE_OUT_RES_STATS_BF: return "TFE_STATS_BF";
  956. case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: return "TFE_STATS_AWB_BG";
  957. case CAM_ISP_TFE_OUT_RES_STATS_RS: return "TFE_STATS_RS";
  958. case CAM_ISP_TFE_OUT_RES_DS4: return "TFE_DS_4";
  959. case CAM_ISP_TFE_OUT_RES_DS16: return "TFE_DS_16";
  960. case CAM_ISP_TFE_OUT_RES_AI: return "TFE_AI";
  961. /* Handle invalid type */
  962. default: return "Invalid_Resource_Type";
  963. }
  964. }
  965. static const char *__cam_isp_resource_handle_id_to_type(
  966. uint32_t device_type, uint32_t resource_handle)
  967. {
  968. switch (device_type) {
  969. case CAM_IFE_DEVICE_TYPE:
  970. return __cam_isp_ife_sfe_resource_handle_id_to_type(resource_handle);
  971. case CAM_TFE_DEVICE_TYPE:
  972. return __cam_isp_tfe_resource_handle_id_to_type(resource_handle);
  973. default:
  974. return "INVALID_DEV_TYPE";
  975. }
  976. }
  977. static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
  978. {
  979. uint64_t ts = 0;
  980. if (!evt_data)
  981. return 0;
  982. switch (evt_id) {
  983. case CAM_ISP_HW_EVENT_ERROR:
  984. ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
  985. timestamp;
  986. break;
  987. case CAM_ISP_HW_EVENT_SOF:
  988. ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
  989. timestamp;
  990. break;
  991. case CAM_ISP_HW_EVENT_REG_UPDATE:
  992. ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
  993. timestamp;
  994. break;
  995. case CAM_ISP_HW_EVENT_EPOCH:
  996. ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
  997. timestamp;
  998. break;
  999. case CAM_ISP_HW_EVENT_EOF:
  1000. ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
  1001. timestamp;
  1002. break;
  1003. case CAM_ISP_HW_EVENT_DONE:
  1004. case CAM_ISP_HW_SECONDARY_EVENT:
  1005. break;
  1006. default:
  1007. CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
  1008. }
  1009. return ts;
  1010. }
  1011. static int __cam_isp_ctx_get_hw_timestamp(struct cam_context *ctx, uint64_t *prev_ts,
  1012. uint64_t *curr_ts, uint64_t *boot_ts)
  1013. {
  1014. struct cam_hw_cmd_args hw_cmd_args;
  1015. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  1016. int rc;
  1017. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  1018. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  1019. hw_cmd_args.u.internal_args = &isp_hw_cmd_args;
  1020. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_SOF_TS;
  1021. rc = ctx->hw_mgr_intf->hw_cmd(ctx->ctxt_to_hw_map, &hw_cmd_args);
  1022. if (rc)
  1023. return rc;
  1024. if (isp_hw_cmd_args.u.sof_ts.prev >= isp_hw_cmd_args.u.sof_ts.curr) {
  1025. CAM_ERR(CAM_ISP, "ctx:%u previous timestamp is greater than current timestamp",
  1026. ctx->ctx_id);
  1027. return -EINVAL;
  1028. }
  1029. *prev_ts = isp_hw_cmd_args.u.sof_ts.prev;
  1030. *curr_ts = isp_hw_cmd_args.u.sof_ts.curr;
  1031. *boot_ts = isp_hw_cmd_args.u.sof_ts.boot;
  1032. return 0;
  1033. }
  1034. static int __cam_isp_ctx_recover_sof_timestamp(struct cam_context *ctx)
  1035. {
  1036. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  1037. uint64_t prev_ts, curr_ts, boot_ts;
  1038. uint64_t a, b, c;
  1039. int rc;
  1040. if (ctx_isp->frame_id < 1) {
  1041. CAM_ERR(CAM_ISP, "ctx:%u Timestamp recovery is not possible for the first frame",
  1042. ctx->ctx_id);
  1043. return -EPERM;
  1044. }
  1045. rc = __cam_isp_ctx_get_hw_timestamp(ctx, &prev_ts, &curr_ts, &boot_ts);
  1046. if (rc) {
  1047. CAM_ERR(CAM_ISP, "ctx:%u Failed to get timestamp from HW", ctx->ctx_id);
  1048. return rc;
  1049. }
  1050. /**
  1051. * If the last received SOF was for frame A and we have missed the SOF for frame B,
  1052. * then we need to find out if the hardware is at frame B or C.
  1053. * +-----+-----+-----+
  1054. * | A | B | C |
  1055. * +-----+-----+-----+
  1056. */
  1057. a = ctx_isp->sof_timestamp_val;
  1058. if (a == prev_ts) {
  1059. /* Hardware is at frame B */
  1060. b = curr_ts;
  1061. CAM_DBG(CAM_ISP, "ctx:%u recovered timestamp (last:0x%llx, curr:0x%llx)",
  1062. ctx->ctx_id, a, b);
  1063. } else if (a < prev_ts) {
  1064. /* Hardware is at frame C */
  1065. b = prev_ts;
  1066. c = curr_ts;
  1067. CAM_DBG(CAM_ISP,
  1068. "ctx:%u recovered timestamp (last:0x%llx, prev:0x%llx, curr:0x%llx)",
  1069. ctx->ctx_id, a, b, c);
  1070. } else {
  1071. /* Hardware is at frame A (which we supposedly missed) */
  1072. CAM_ERR(CAM_ISP,
  1073. "ctx:%u erroneous call to SOF recovery (last:0x%llx, prev:0x%llx, curr:0x%llx)",
  1074. ctx->ctx_id, a, prev_ts, curr_ts);
  1075. return 0;
  1076. }
  1077. ctx_isp->boot_timestamp += (b - a);
  1078. ctx_isp->sof_timestamp_val = b;
  1079. ctx_isp->frame_id++;
  1080. return 0;
  1081. }
  1082. static void __cam_isp_ctx_send_sof_boot_timestamp(
  1083. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1084. uint32_t sof_event_status)
  1085. {
  1086. struct cam_req_mgr_message req_msg;
  1087. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1088. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1089. req_msg.u.frame_msg.request_id = request_id;
  1090. req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
  1091. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1092. req_msg.u.frame_msg.sof_status = sof_event_status;
  1093. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1094. CAM_DBG(CAM_ISP,
  1095. "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u",
  1096. request_id, ctx_isp->frame_id,
  1097. ctx_isp->boot_timestamp, sof_event_status);
  1098. if (cam_req_mgr_notify_message(&req_msg,
  1099. V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
  1100. V4L_EVENT_CAM_REQ_MGR_EVENT))
  1101. CAM_ERR(CAM_ISP,
  1102. "Error in notifying the boot time for req id:%lld",
  1103. request_id);
  1104. }
  1105. static void __cam_isp_ctx_send_unified_timestamp(
  1106. struct cam_isp_context *ctx_isp, uint64_t request_id)
  1107. {
  1108. struct cam_req_mgr_message req_msg;
  1109. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1110. req_msg.u.frame_msg_v2.frame_id = ctx_isp->frame_id;
  1111. req_msg.u.frame_msg_v2.request_id = request_id;
  1112. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_SOF_QTIMER_TIMESTAMP] =
  1113. (request_id == 0) ? 0 : ctx_isp->sof_timestamp_val;
  1114. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_BOOT_TIMESTAMP] = ctx_isp->boot_timestamp;
  1115. req_msg.u.frame_msg_v2.link_hdl = ctx_isp->base->link_hdl;
  1116. req_msg.u.frame_msg_v2.frame_id_meta = ctx_isp->frame_id_meta;
  1117. CAM_DBG(CAM_ISP,
  1118. "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:0x%llx ctx %d\
  1119. boot time stamp:0x%llx", ctx_isp->base->link_hdl, request_id,
  1120. ctx_isp->frame_id, ctx_isp->sof_timestamp_val,ctx_isp->base->ctx_id,
  1121. ctx_isp->boot_timestamp);
  1122. if (cam_req_mgr_notify_message(&req_msg,
  1123. V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1124. CAM_ERR(CAM_ISP,
  1125. "Error in notifying the sof and boot time for req id:%lld",
  1126. request_id);
  1127. }
  1128. static void __cam_isp_ctx_send_sof_timestamp_frame_header(
  1129. struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr,
  1130. uint64_t request_id, uint32_t sof_event_status)
  1131. {
  1132. uint32_t *time32 = NULL;
  1133. uint64_t timestamp = 0;
  1134. struct cam_req_mgr_message req_msg;
  1135. time32 = frame_header_cpu_addr;
  1136. timestamp = (uint64_t) time32[1];
  1137. timestamp = timestamp << 24;
  1138. timestamp |= (uint64_t)(time32[0] >> 8);
  1139. timestamp = mul_u64_u32_div(timestamp,
  1140. CAM_IFE_QTIMER_MUL_FACTOR,
  1141. CAM_IFE_QTIMER_DIV_FACTOR);
  1142. ctx_isp->sof_timestamp_val = timestamp;
  1143. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1144. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1145. req_msg.u.frame_msg.request_id = request_id;
  1146. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1147. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1148. req_msg.u.frame_msg.sof_status = sof_event_status;
  1149. CAM_DBG(CAM_ISP,
  1150. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1151. request_id, ctx_isp->frame_id,
  1152. ctx_isp->sof_timestamp_val, sof_event_status);
  1153. if (cam_req_mgr_notify_message(&req_msg,
  1154. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1155. CAM_ERR(CAM_ISP,
  1156. "Error in notifying the sof time for req id:%lld",
  1157. request_id);
  1158. }
  1159. static void __cam_isp_ctx_send_sof_timestamp(
  1160. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1161. uint32_t sof_event_status)
  1162. {
  1163. struct cam_req_mgr_message req_msg;
  1164. if (ctx_isp->reported_frame_id == ctx_isp->frame_id) {
  1165. if (__cam_isp_ctx_recover_sof_timestamp(ctx_isp->base))
  1166. CAM_WARN(CAM_ISP, "Missed SOF. Unable to recover SOF timestamp.");
  1167. }
  1168. ctx_isp->reported_frame_id = ctx_isp->frame_id;
  1169. if ((ctx_isp->v4l2_event_sub_ids & (1 << V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS))
  1170. && !ctx_isp->use_frame_header_ts) {
  1171. __cam_isp_ctx_send_unified_timestamp(ctx_isp,request_id);
  1172. return;
  1173. }
  1174. if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
  1175. goto end;
  1176. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1177. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1178. req_msg.u.frame_msg.request_id = request_id;
  1179. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1180. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1181. req_msg.u.frame_msg.sof_status = sof_event_status;
  1182. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1183. CAM_DBG(CAM_ISP,
  1184. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1185. request_id, ctx_isp->frame_id,
  1186. ctx_isp->sof_timestamp_val, sof_event_status);
  1187. if (cam_req_mgr_notify_message(&req_msg,
  1188. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1189. CAM_ERR(CAM_ISP,
  1190. "Error in notifying the sof time for req id:%lld",
  1191. request_id);
  1192. end:
  1193. __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
  1194. request_id, sof_event_status);
  1195. }
  1196. static void __cam_isp_ctx_handle_buf_done_fail_log(
  1197. uint64_t request_id, struct cam_isp_ctx_req *req_isp,
  1198. uint32_t isp_device_type)
  1199. {
  1200. int i;
  1201. const char *handle_type;
  1202. if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
  1203. CAM_ERR(CAM_ISP,
  1204. "Num Resources exceed mMAX %d >= %d ",
  1205. req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
  1206. return;
  1207. }
  1208. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1209. "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d",
  1210. request_id, req_isp->num_fence_map_out, req_isp->num_acked,
  1211. req_isp->bubble_report, req_isp->bubble_detected);
  1212. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1213. "Resource Handles that fail to generate buf_done in prev frame");
  1214. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1215. if (req_isp->fence_map_out[i].sync_id != -1) {
  1216. handle_type = __cam_isp_resource_handle_id_to_type(
  1217. isp_device_type, req_isp->fence_map_out[i].resource_handle);
  1218. trace_cam_log_event("Buf_done Congestion",
  1219. handle_type, request_id, req_isp->fence_map_out[i].sync_id);
  1220. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1221. "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
  1222. handle_type,
  1223. req_isp->fence_map_out[i].resource_handle,
  1224. req_isp->fence_map_out[i].sync_id);
  1225. }
  1226. }
  1227. }
  1228. static void __cam_isp_context_reset_internal_recovery_params(
  1229. struct cam_isp_context *ctx_isp)
  1230. {
  1231. atomic_set(&ctx_isp->internal_recovery_set, 0);
  1232. atomic_set(&ctx_isp->process_bubble, 0);
  1233. ctx_isp->recovery_req_id = 0;
  1234. ctx_isp->aeb_error_cnt = 0;
  1235. }
  1236. static int __cam_isp_context_try_internal_recovery(
  1237. struct cam_isp_context *ctx_isp)
  1238. {
  1239. int rc = 0;
  1240. struct cam_context *ctx = ctx_isp->base;
  1241. struct cam_ctx_request *req;
  1242. struct cam_isp_ctx_req *req_isp;
  1243. /*
  1244. * Start with wait list, if recovery is stil set
  1245. * errored request has not been moved to pending yet.
  1246. * Buf done for errored request has not occurred recover
  1247. * from here
  1248. */
  1249. if (!list_empty(&ctx->wait_req_list)) {
  1250. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  1251. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1252. if (req->request_id == ctx_isp->recovery_req_id) {
  1253. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1254. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1255. if (rc) {
  1256. /* Unable to do bubble recovery reset back to normal */
  1257. CAM_WARN(CAM_ISP,
  1258. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1259. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1260. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1261. req_isp->bubble_detected = false;
  1262. goto end;
  1263. }
  1264. list_del_init(&req->list);
  1265. list_add(&req->list, &ctx->pending_req_list);
  1266. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1267. CAM_INFO(CAM_ISP,
  1268. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1269. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1270. goto end;
  1271. }
  1272. }
  1273. /*
  1274. * If not in wait list only other possibility is request is in pending list
  1275. * on error detection, bubble detect is set assuming new frame after detection
  1276. * comes in, there is an rup it's moved to active list and it finishes with
  1277. * it's buf done's
  1278. */
  1279. if (!list_empty(&ctx->pending_req_list)) {
  1280. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list);
  1281. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1282. if (req->request_id == ctx_isp->recovery_req_id) {
  1283. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1284. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1285. if (rc) {
  1286. /* Unable to do bubble recovery reset back to normal */
  1287. CAM_WARN(CAM_ISP,
  1288. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1289. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1290. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1291. req_isp->bubble_detected = false;
  1292. goto end;
  1293. }
  1294. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1295. CAM_INFO(CAM_ISP,
  1296. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1297. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1298. goto end;
  1299. }
  1300. }
  1301. /* If request is not found in either of the lists skip recovery */
  1302. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1303. end:
  1304. return rc;
  1305. }
  1306. static int __cam_isp_ctx_handle_buf_done_for_req_list(
  1307. struct cam_isp_context *ctx_isp,
  1308. struct cam_ctx_request *req)
  1309. {
  1310. int rc = 0, i;
  1311. uint64_t buf_done_req_id;
  1312. struct cam_isp_ctx_req *req_isp;
  1313. struct cam_context *ctx = ctx_isp->base;
  1314. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1315. ctx_isp->active_req_cnt--;
  1316. buf_done_req_id = req->request_id;
  1317. if (req_isp->bubble_detected && req_isp->bubble_report) {
  1318. req_isp->num_acked = 0;
  1319. req_isp->num_deferred_acks = 0;
  1320. req_isp->bubble_detected = false;
  1321. list_del_init(&req->list);
  1322. atomic_set(&ctx_isp->process_bubble, 0);
  1323. req_isp->cdm_reset_before_apply = false;
  1324. ctx_isp->bubble_frame_cnt = 0;
  1325. if (buf_done_req_id <= ctx->last_flush_req) {
  1326. for (i = 0; i < req_isp->num_fence_map_out; i++)
  1327. rc = cam_sync_signal(
  1328. req_isp->fence_map_out[i].sync_id,
  1329. CAM_SYNC_STATE_SIGNALED_ERROR,
  1330. CAM_SYNC_ISP_EVENT_BUBBLE);
  1331. list_add_tail(&req->list, &ctx->free_req_list);
  1332. CAM_DBG(CAM_REQ,
  1333. "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
  1334. buf_done_req_id, ctx_isp->active_req_cnt,
  1335. ctx->ctx_id);
  1336. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1337. } else {
  1338. list_add(&req->list, &ctx->pending_req_list);
  1339. CAM_DBG(CAM_REQ,
  1340. "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
  1341. req->request_id, ctx_isp->active_req_cnt,
  1342. ctx->ctx_id);
  1343. }
  1344. } else {
  1345. if (!ctx_isp->use_frame_header_ts) {
  1346. if (ctx_isp->reported_req_id < buf_done_req_id) {
  1347. ctx_isp->reported_req_id = buf_done_req_id;
  1348. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  1349. buf_done_req_id,
  1350. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1351. }
  1352. }
  1353. list_del_init(&req->list);
  1354. list_add_tail(&req->list, &ctx->free_req_list);
  1355. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  1356. req_isp->cdm_reset_before_apply = false;
  1357. req_isp->num_acked = 0;
  1358. req_isp->num_deferred_acks = 0;
  1359. /*
  1360. * Only update the process_bubble and bubble_frame_cnt
  1361. * when bubble is detected on this req, in case the other
  1362. * request is processing bubble.
  1363. */
  1364. if (req_isp->bubble_detected) {
  1365. atomic_set(&ctx_isp->process_bubble, 0);
  1366. ctx_isp->bubble_frame_cnt = 0;
  1367. req_isp->bubble_detected = false;
  1368. }
  1369. CAM_DBG(CAM_REQ,
  1370. "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
  1371. buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1372. ctx_isp->req_info.last_bufdone_req_id = req->request_id;
  1373. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1374. }
  1375. if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt)
  1376. __cam_isp_context_try_internal_recovery(ctx_isp);
  1377. cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
  1378. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1379. CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
  1380. __cam_isp_ctx_update_event_record(ctx_isp,
  1381. CAM_ISP_CTX_EVENT_BUFDONE, req);
  1382. return rc;
  1383. }
  1384. static int __cam_isp_ctx_handle_buf_done_for_request(
  1385. struct cam_isp_context *ctx_isp,
  1386. struct cam_ctx_request *req,
  1387. struct cam_isp_hw_done_event_data *done,
  1388. uint32_t bubble_state,
  1389. struct cam_isp_hw_done_event_data *done_next_req)
  1390. {
  1391. int rc = 0;
  1392. int i, j;
  1393. struct cam_isp_ctx_req *req_isp;
  1394. struct cam_context *ctx = ctx_isp->base;
  1395. const char *handle_type;
  1396. trace_cam_buf_done("ISP", ctx, req);
  1397. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1398. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1399. bubble_state, req_isp->bubble_detected);
  1400. done_next_req->num_handles = 0;
  1401. done_next_req->timestamp = done->timestamp;
  1402. for (i = 0; i < done->num_handles; i++) {
  1403. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1404. if (done->resource_handle[i] ==
  1405. req_isp->fence_map_out[j].resource_handle)
  1406. break;
  1407. }
  1408. if (j == req_isp->num_fence_map_out) {
  1409. /*
  1410. * If not found in current request, it could be
  1411. * belonging to next request, this can happen if
  1412. * IRQ delay happens. It is only valid when the
  1413. * platform doesn't have last consumed address.
  1414. */
  1415. CAM_WARN(CAM_ISP,
  1416. "BUF_DONE for res %s not found in Req %lld ",
  1417. __cam_isp_resource_handle_id_to_type(
  1418. ctx_isp->isp_device_type,
  1419. done->resource_handle[i]),
  1420. req->request_id);
  1421. done_next_req->resource_handle
  1422. [done_next_req->num_handles++] =
  1423. done->resource_handle[i];
  1424. continue;
  1425. }
  1426. if (req_isp->fence_map_out[j].sync_id == -1) {
  1427. handle_type =
  1428. __cam_isp_resource_handle_id_to_type(
  1429. ctx_isp->isp_device_type,
  1430. req_isp->fence_map_out[j].resource_handle);
  1431. CAM_WARN(CAM_ISP,
  1432. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1433. req->request_id, i, j, handle_type);
  1434. trace_cam_log_event("Duplicate BufDone",
  1435. handle_type, req->request_id, ctx->ctx_id);
  1436. done_next_req->resource_handle
  1437. [done_next_req->num_handles++] =
  1438. done->resource_handle[i];
  1439. continue;
  1440. }
  1441. /* Get buf handles from packet and retrieve them from presil framework */
  1442. if (cam_presil_mode_enabled()) {
  1443. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1444. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1445. if (rc) {
  1446. CAM_ERR(CAM_ISP,
  1447. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1448. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1449. return rc;
  1450. }
  1451. }
  1452. if (!req_isp->bubble_detected) {
  1453. CAM_DBG(CAM_ISP,
  1454. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1455. req->request_id,
  1456. req_isp->fence_map_out[j].resource_handle,
  1457. req_isp->fence_map_out[j].sync_id,
  1458. ctx->ctx_id);
  1459. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1460. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1461. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1462. if (rc)
  1463. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  1464. rc);
  1465. } else if (!req_isp->bubble_report) {
  1466. CAM_DBG(CAM_ISP,
  1467. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1468. req->request_id,
  1469. req_isp->fence_map_out[j].resource_handle,
  1470. req_isp->fence_map_out[j].sync_id,
  1471. ctx->ctx_id);
  1472. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1473. CAM_SYNC_STATE_SIGNALED_ERROR,
  1474. CAM_SYNC_ISP_EVENT_BUBBLE);
  1475. if (rc)
  1476. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  1477. rc);
  1478. } else {
  1479. /*
  1480. * Ignore the buffer done if bubble detect is on
  1481. * Increment the ack number here, and queue the
  1482. * request back to pending list whenever all the
  1483. * buffers are done.
  1484. */
  1485. req_isp->num_acked++;
  1486. CAM_DBG(CAM_ISP,
  1487. "buf done with bubble state %d recovery %d",
  1488. bubble_state, req_isp->bubble_report);
  1489. continue;
  1490. }
  1491. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1492. req->request_id,
  1493. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1494. if (!rc) {
  1495. req_isp->num_acked++;
  1496. req_isp->fence_map_out[j].sync_id = -1;
  1497. }
  1498. if ((ctx_isp->use_frame_header_ts) &&
  1499. (req_isp->hw_update_data.frame_header_res_id ==
  1500. req_isp->fence_map_out[j].resource_handle))
  1501. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1502. ctx_isp,
  1503. req_isp->hw_update_data.frame_header_cpu_addr,
  1504. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1505. }
  1506. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1507. /* Should not happen */
  1508. CAM_ERR(CAM_ISP,
  1509. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1510. req->request_id, req_isp->num_acked,
  1511. req_isp->num_fence_map_out, ctx->ctx_id);
  1512. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1513. }
  1514. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1515. return rc;
  1516. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1517. return rc;
  1518. }
  1519. static int __cam_isp_handle_deferred_buf_done(
  1520. struct cam_isp_context *ctx_isp,
  1521. struct cam_ctx_request *req,
  1522. bool bubble_handling,
  1523. uint32_t status, uint32_t event_cause)
  1524. {
  1525. int i, j;
  1526. int rc = 0;
  1527. struct cam_isp_ctx_req *req_isp =
  1528. (struct cam_isp_ctx_req *) req->req_priv;
  1529. struct cam_context *ctx = ctx_isp->base;
  1530. CAM_DBG(CAM_ISP,
  1531. "ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
  1532. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1533. req_isp->num_acked, bubble_handling);
  1534. for (i = 0; i < req_isp->num_deferred_acks; i++) {
  1535. j = req_isp->deferred_fence_map_index[i];
  1536. CAM_DBG(CAM_ISP,
  1537. "ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
  1538. ctx->ctx_id, status, event_cause,
  1539. req->request_id,
  1540. req_isp->fence_map_out[j].resource_handle,
  1541. req_isp->fence_map_out[j].sync_id);
  1542. if (req_isp->fence_map_out[j].sync_id == -1) {
  1543. CAM_WARN(CAM_ISP,
  1544. "ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
  1545. ctx->ctx_id, req->request_id, j,
  1546. req_isp->fence_map_out[j].resource_handle);
  1547. continue;
  1548. }
  1549. if (!bubble_handling) {
  1550. CAM_WARN(CAM_ISP,
  1551. "ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
  1552. ctx->ctx_id, req->request_id, status,
  1553. req_isp->fence_map_out[j].resource_handle);
  1554. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1555. status, event_cause);
  1556. if (rc) {
  1557. CAM_ERR(CAM_ISP,
  1558. "ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
  1559. ctx->ctx_id, req->request_id,
  1560. req_isp->fence_map_out[j].sync_id,
  1561. status, rc);
  1562. } else {
  1563. req_isp->num_acked++;
  1564. req_isp->fence_map_out[j].sync_id = -1;
  1565. }
  1566. } else {
  1567. req_isp->num_acked++;
  1568. }
  1569. }
  1570. CAM_DBG(CAM_ISP,
  1571. "ctx[%d] : Req %llu : Handled %d deferred buf_dones num_acked=%d, num_fence_map_out=%d",
  1572. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1573. req_isp->num_acked, req_isp->num_fence_map_out);
  1574. req_isp->num_deferred_acks = 0;
  1575. return rc;
  1576. }
  1577. static int __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  1578. struct cam_isp_context *ctx_isp,
  1579. struct cam_ctx_request *req)
  1580. {
  1581. int rc = 0;
  1582. struct cam_context *ctx = ctx_isp->base;
  1583. struct cam_isp_ctx_req *req_isp;
  1584. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1585. if (req_isp->num_deferred_acks)
  1586. rc = __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1587. req_isp->bubble_report,
  1588. CAM_SYNC_STATE_SIGNALED_ERROR,
  1589. CAM_SYNC_ISP_EVENT_BUBBLE);
  1590. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1591. /* Should not happen */
  1592. CAM_ERR(CAM_ISP,
  1593. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1594. req->request_id, req_isp->num_acked,
  1595. req_isp->num_fence_map_out, ctx->ctx_id);
  1596. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1597. }
  1598. if (req_isp->num_acked == req_isp->num_fence_map_out)
  1599. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1600. return rc;
  1601. }
  1602. static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1603. struct cam_isp_context *ctx_isp,
  1604. struct cam_ctx_request *req,
  1605. struct cam_isp_hw_done_event_data *done,
  1606. uint32_t bubble_state,
  1607. bool verify_consumed_addr,
  1608. bool defer_buf_done)
  1609. {
  1610. int rc = 0;
  1611. int i, j;
  1612. struct cam_isp_ctx_req *req_isp;
  1613. struct cam_context *ctx = ctx_isp->base;
  1614. const char *handle_type;
  1615. uint32_t cmp_addr = 0;
  1616. trace_cam_buf_done("ISP", ctx, req);
  1617. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1618. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1619. bubble_state, req_isp->bubble_detected);
  1620. if (done->num_handles > CAM_NUM_OUT_PER_COMP_IRQ_MAX) {
  1621. CAM_ERR(CAM_ISP, "ctx: %u req: %llu num_handles: %u is more than %u",
  1622. ctx->ctx_id, req->request_id,
  1623. done->num_handles, CAM_NUM_OUT_PER_COMP_IRQ_MAX);
  1624. return -EINVAL;
  1625. }
  1626. for (i = 0; i < done->num_handles; i++) {
  1627. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1628. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1629. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1630. req_isp->fence_map_out[j].image_buf_addr[0];
  1631. if (verify_consumed_addr && (done->last_consumed_addr[i] != cmp_addr))
  1632. continue;
  1633. if (done->resource_handle[i] ==
  1634. req_isp->fence_map_out[j].resource_handle)
  1635. break;
  1636. }
  1637. if (j == req_isp->num_fence_map_out) {
  1638. /*
  1639. * If not found in current request, it could be
  1640. * belonging to next request, this can happen if
  1641. * IRQ delay happens. It is only valid when the
  1642. * platform doesn't have last consumed address.
  1643. */
  1644. CAM_DBG(CAM_ISP,
  1645. "BUF_DONE for res %s not found in Req %lld ",
  1646. __cam_isp_resource_handle_id_to_type(
  1647. ctx_isp->isp_device_type, done->resource_handle[i]),
  1648. req->request_id);
  1649. continue;
  1650. }
  1651. if (req_isp->fence_map_out[j].sync_id == -1) {
  1652. handle_type = __cam_isp_resource_handle_id_to_type(
  1653. ctx_isp->isp_device_type,
  1654. req_isp->fence_map_out[j].resource_handle);
  1655. CAM_WARN(CAM_ISP,
  1656. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1657. req->request_id, i, j, handle_type);
  1658. trace_cam_log_event("Duplicate BufDone",
  1659. handle_type, req->request_id, ctx->ctx_id);
  1660. continue;
  1661. }
  1662. /* Get buf handles from packet and retrieve them from presil framework */
  1663. if (cam_presil_mode_enabled()) {
  1664. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1665. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1666. if (rc) {
  1667. CAM_ERR(CAM_ISP,
  1668. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1669. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1670. return rc;
  1671. }
  1672. }
  1673. if (defer_buf_done) {
  1674. uint32_t deferred_indx = req_isp->num_deferred_acks;
  1675. /*
  1676. * If we are handling this BUF_DONE event for a request
  1677. * that is still in wait_list, do not signal now,
  1678. * instead mark it as done and handle it later -
  1679. * if this request is going into BUBBLE state later
  1680. * it will automatically be re-applied. If this is not
  1681. * going into BUBBLE, signal fences later.
  1682. * Note - we will come here only if the last consumed
  1683. * address matches with this ports buffer.
  1684. */
  1685. req_isp->deferred_fence_map_index[deferred_indx] = j;
  1686. req_isp->num_deferred_acks++;
  1687. CAM_DBG(CAM_ISP,
  1688. "ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
  1689. ctx->ctx_id, req->request_id, bubble_state,
  1690. req_isp->bubble_report);
  1691. CAM_DBG(CAM_ISP,
  1692. "ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
  1693. ctx->ctx_id, req_isp->num_deferred_acks, j,
  1694. req_isp->fence_map_out[j].resource_handle,
  1695. req_isp->fence_map_out[j].sync_id);
  1696. continue;
  1697. } else if (!req_isp->bubble_detected) {
  1698. CAM_DBG(CAM_ISP,
  1699. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1700. req->request_id,
  1701. req_isp->fence_map_out[j].resource_handle,
  1702. req_isp->fence_map_out[j].sync_id,
  1703. ctx->ctx_id);
  1704. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1705. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1706. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1707. if (rc) {
  1708. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1709. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1710. } else if (req_isp->num_deferred_acks) {
  1711. /* Process deferred buf_done acks */
  1712. __cam_isp_handle_deferred_buf_done(ctx_isp,
  1713. req, false,
  1714. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1715. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1716. }
  1717. /* Reset fence */
  1718. req_isp->fence_map_out[j].sync_id = -1;
  1719. } else if (!req_isp->bubble_report) {
  1720. CAM_DBG(CAM_ISP,
  1721. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1722. req->request_id,
  1723. req_isp->fence_map_out[j].resource_handle,
  1724. req_isp->fence_map_out[j].sync_id,
  1725. ctx->ctx_id);
  1726. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1727. CAM_SYNC_STATE_SIGNALED_ERROR,
  1728. CAM_SYNC_ISP_EVENT_BUBBLE);
  1729. if (rc) {
  1730. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1731. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1732. } else if (req_isp->num_deferred_acks) {
  1733. /* Process deferred buf_done acks */
  1734. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1735. false,
  1736. CAM_SYNC_STATE_SIGNALED_ERROR,
  1737. CAM_SYNC_ISP_EVENT_BUBBLE);
  1738. }
  1739. /* Reset fence */
  1740. req_isp->fence_map_out[j].sync_id = -1;
  1741. } else {
  1742. /*
  1743. * Ignore the buffer done if bubble detect is on
  1744. * Increment the ack number here, and queue the
  1745. * request back to pending list whenever all the
  1746. * buffers are done.
  1747. */
  1748. req_isp->num_acked++;
  1749. CAM_DBG(CAM_ISP,
  1750. "buf done with bubble state %d recovery %d",
  1751. bubble_state, req_isp->bubble_report);
  1752. /* Process deferred buf_done acks */
  1753. if (req_isp->num_deferred_acks)
  1754. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1755. true,
  1756. CAM_SYNC_STATE_SIGNALED_ERROR,
  1757. CAM_SYNC_ISP_EVENT_BUBBLE);
  1758. continue;
  1759. }
  1760. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1761. req->request_id,
  1762. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1763. if (!rc) {
  1764. req_isp->num_acked++;
  1765. }
  1766. if ((ctx_isp->use_frame_header_ts) &&
  1767. (req_isp->hw_update_data.frame_header_res_id ==
  1768. req_isp->fence_map_out[j].resource_handle))
  1769. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1770. ctx_isp,
  1771. req_isp->hw_update_data.frame_header_cpu_addr,
  1772. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1773. }
  1774. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1775. /* Should not happen */
  1776. CAM_ERR(CAM_ISP,
  1777. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1778. req->request_id, req_isp->num_acked,
  1779. req_isp->num_fence_map_out, ctx->ctx_id);
  1780. }
  1781. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1782. return rc;
  1783. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1784. return rc;
  1785. }
  1786. static int __cam_isp_ctx_handle_buf_done(
  1787. struct cam_isp_context *ctx_isp,
  1788. struct cam_isp_hw_done_event_data *done,
  1789. uint32_t bubble_state)
  1790. {
  1791. int rc = 0;
  1792. struct cam_ctx_request *req;
  1793. struct cam_context *ctx = ctx_isp->base;
  1794. struct cam_isp_hw_done_event_data done_next_req;
  1795. if (list_empty(&ctx->active_req_list)) {
  1796. CAM_WARN(CAM_ISP, "Buf done with no active request");
  1797. return 0;
  1798. }
  1799. req = list_first_entry(&ctx->active_req_list,
  1800. struct cam_ctx_request, list);
  1801. rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done,
  1802. bubble_state, &done_next_req);
  1803. if (done_next_req.num_handles) {
  1804. struct cam_isp_hw_done_event_data unhandled_res;
  1805. struct cam_ctx_request *next_req = list_last_entry(
  1806. &ctx->active_req_list, struct cam_ctx_request, list);
  1807. if (next_req->request_id != req->request_id) {
  1808. /*
  1809. * Few resource handles are already signalled in the
  1810. * current request, lets check if there is another
  1811. * request waiting for these resources. This can
  1812. * happen if handling some of next request's buf done
  1813. * events are happening first before handling current
  1814. * request's remaining buf dones due to IRQ scheduling.
  1815. * Lets check only one more request as we will have
  1816. * maximum of 2 requests in active_list at any time.
  1817. */
  1818. CAM_WARN(CAM_ISP,
  1819. "Unhandled buf done resources for req %lld, trying next request %lld in active_list",
  1820. req->request_id, next_req->request_id);
  1821. __cam_isp_ctx_handle_buf_done_for_request(ctx_isp,
  1822. next_req, &done_next_req,
  1823. bubble_state, &unhandled_res);
  1824. if (unhandled_res.num_handles == 0)
  1825. CAM_INFO(CAM_ISP,
  1826. "BUF Done event handed for next request %lld",
  1827. next_req->request_id);
  1828. else
  1829. CAM_ERR(CAM_ISP,
  1830. "BUF Done not handled for next request %lld",
  1831. next_req->request_id);
  1832. } else {
  1833. CAM_WARN(CAM_ISP,
  1834. "Req %lld only active request, spurious buf_done rxd",
  1835. req->request_id);
  1836. }
  1837. }
  1838. return rc;
  1839. }
  1840. static void __cam_isp_ctx_buf_done_match_req(
  1841. struct cam_ctx_request *req,
  1842. struct cam_isp_hw_done_event_data *done,
  1843. bool *irq_delay_detected)
  1844. {
  1845. int i, j;
  1846. uint32_t match_count = 0;
  1847. struct cam_isp_ctx_req *req_isp;
  1848. uint32_t cmp_addr = 0;
  1849. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1850. for (i = 0; i < done->num_handles; i++) {
  1851. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1852. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1853. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1854. req_isp->fence_map_out[j].image_buf_addr[0];
  1855. if ((done->resource_handle[i] ==
  1856. req_isp->fence_map_out[j].resource_handle) &&
  1857. (done->last_consumed_addr[i] == cmp_addr)) {
  1858. match_count++;
  1859. break;
  1860. }
  1861. }
  1862. }
  1863. if (match_count > 0)
  1864. *irq_delay_detected = true;
  1865. else
  1866. *irq_delay_detected = false;
  1867. CAM_DBG(CAM_ISP,
  1868. "buf done num handles %d match count %d for next req:%lld",
  1869. done->num_handles, match_count, req->request_id);
  1870. CAM_DBG(CAM_ISP,
  1871. "irq_delay_detected %d", *irq_delay_detected);
  1872. }
  1873. static int __cam_isp_ctx_handle_buf_done_verify_addr(
  1874. struct cam_isp_context *ctx_isp,
  1875. struct cam_isp_hw_done_event_data *done,
  1876. uint32_t bubble_state)
  1877. {
  1878. int rc = 0;
  1879. bool irq_delay_detected = false;
  1880. struct cam_ctx_request *req;
  1881. struct cam_ctx_request *next_req = NULL;
  1882. struct cam_context *ctx = ctx_isp->base;
  1883. struct cam_isp_ctx_req *req_isp;
  1884. bool req_in_pending_wait_list = false;
  1885. if (list_empty(&ctx->active_req_list)) {
  1886. if (!list_empty(&ctx->wait_req_list)) {
  1887. req = list_first_entry(&ctx->wait_req_list,
  1888. struct cam_ctx_request, list);
  1889. req_in_pending_wait_list = true;
  1890. if (ctx_isp->last_applied_req_id !=
  1891. ctx_isp->last_bufdone_err_apply_req_id) {
  1892. CAM_WARN(CAM_ISP,
  1893. "Buf done with no active request but with req in wait list, req %llu last apply id:%lld last err id:%lld",
  1894. req->request_id,
  1895. ctx_isp->last_applied_req_id,
  1896. ctx_isp->last_bufdone_err_apply_req_id);
  1897. ctx_isp->last_bufdone_err_apply_req_id =
  1898. ctx_isp->last_applied_req_id;
  1899. }
  1900. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1901. /*
  1902. * Verify consumed address for this request to make sure
  1903. * we are handling the buf_done for the correct
  1904. * buffer. Also defer actual buf_done handling, i.e
  1905. * do not signal the fence as this request may go into
  1906. * Bubble state eventully.
  1907. */
  1908. rc =
  1909. __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1910. ctx_isp, req, done, bubble_state, true, true);
  1911. } else if (!list_empty(&ctx->pending_req_list)) {
  1912. /*
  1913. * We saw the case that the hw config is blocked due to
  1914. * some reason, the we get the reg upd and buf done before
  1915. * the req is added to wait req list.
  1916. */
  1917. req = list_first_entry(&ctx->pending_req_list,
  1918. struct cam_ctx_request, list);
  1919. req_in_pending_wait_list = true;
  1920. if (ctx_isp->last_applied_req_id !=
  1921. ctx_isp->last_bufdone_err_apply_req_id) {
  1922. CAM_WARN(CAM_ISP,
  1923. "Buf done with no active request but with req in pending list, req %llu last apply id:%lld last err id:%lld",
  1924. req->request_id,
  1925. ctx_isp->last_applied_req_id,
  1926. ctx_isp->last_bufdone_err_apply_req_id);
  1927. ctx_isp->last_bufdone_err_apply_req_id =
  1928. ctx_isp->last_applied_req_id;
  1929. }
  1930. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1931. /*
  1932. * Verify consumed address for this request to make sure
  1933. * we are handling the buf_done for the correct
  1934. * buffer. Also defer actual buf_done handling, i.e
  1935. * do not signal the fence as this request may go into
  1936. * Bubble state eventully.
  1937. */
  1938. rc =
  1939. __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1940. ctx_isp, req, done, bubble_state, true, true);
  1941. }
  1942. if (!req_in_pending_wait_list && (ctx_isp->last_applied_req_id !=
  1943. ctx_isp->last_bufdone_err_apply_req_id)) {
  1944. CAM_WARN(CAM_ISP,
  1945. "Buf done with no active request bubble_state=%d last_applied_req_id:%lld ",
  1946. bubble_state, ctx_isp->last_applied_req_id);
  1947. ctx_isp->last_bufdone_err_apply_req_id =
  1948. ctx_isp->last_applied_req_id;
  1949. }
  1950. return 0;
  1951. }
  1952. req = list_first_entry(&ctx->active_req_list,
  1953. struct cam_ctx_request, list);
  1954. if (ctx_isp->active_req_cnt > 1) {
  1955. next_req = list_last_entry(
  1956. &ctx->active_req_list,
  1957. struct cam_ctx_request, list);
  1958. if (next_req->request_id != req->request_id)
  1959. __cam_isp_ctx_buf_done_match_req(next_req, done,
  1960. &irq_delay_detected);
  1961. else
  1962. CAM_WARN(CAM_ISP,
  1963. "Req %lld only active request, spurious buf_done rxd",
  1964. req->request_id);
  1965. }
  1966. /*
  1967. * If irq delay isn't detected, then we need to verify
  1968. * the consumed address for current req, otherwise, we
  1969. * can't verify the consumed address.
  1970. */
  1971. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1972. ctx_isp, req, done, bubble_state,
  1973. !irq_delay_detected, false);
  1974. /*
  1975. * Verify the consumed address for next req all the time,
  1976. * since the reported buf done event may belong to current
  1977. * req, then we can't signal this event for next req.
  1978. */
  1979. if (!rc && irq_delay_detected)
  1980. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1981. ctx_isp, next_req, done,
  1982. bubble_state, true, false);
  1983. return rc;
  1984. }
  1985. static int __cam_isp_ctx_handle_buf_done_in_activated_state(
  1986. struct cam_isp_context *ctx_isp,
  1987. struct cam_isp_hw_done_event_data *done,
  1988. uint32_t bubble_state)
  1989. {
  1990. int rc = 0;
  1991. if (ctx_isp->support_consumed_addr)
  1992. rc = __cam_isp_ctx_handle_buf_done_verify_addr(
  1993. ctx_isp, done, bubble_state);
  1994. else
  1995. rc = __cam_isp_ctx_handle_buf_done(
  1996. ctx_isp, done, bubble_state);
  1997. return rc;
  1998. }
  1999. static int __cam_isp_ctx_apply_req_offline(
  2000. void *priv, void *data)
  2001. {
  2002. int rc = 0;
  2003. int64_t prev_applied_req;
  2004. struct cam_context *ctx = NULL;
  2005. struct cam_isp_context *ctx_isp = priv;
  2006. struct cam_ctx_request *req;
  2007. struct cam_isp_ctx_req *req_isp;
  2008. struct cam_hw_config_args cfg;
  2009. if (!ctx_isp) {
  2010. CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx);
  2011. rc = -EINVAL;
  2012. goto end;
  2013. }
  2014. ctx = ctx_isp->base;
  2015. if (list_empty(&ctx->pending_req_list)) {
  2016. CAM_DBG(CAM_ISP, "No pending requests to apply");
  2017. rc = -EFAULT;
  2018. goto end;
  2019. }
  2020. if ((ctx->state != CAM_CTX_ACTIVATED) ||
  2021. (!atomic_read(&ctx_isp->rxd_epoch)) ||
  2022. (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED))
  2023. goto end;
  2024. if (ctx_isp->active_req_cnt >= 2)
  2025. goto end;
  2026. spin_lock_bh(&ctx->lock);
  2027. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2028. list);
  2029. spin_unlock_bh(&ctx->lock);
  2030. CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
  2031. req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
  2032. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2033. memset(&cfg, 0, sizeof(cfg));
  2034. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2035. cfg.request_id = req->request_id;
  2036. cfg.hw_update_entries = req_isp->cfg;
  2037. cfg.num_hw_update_entries = req_isp->num_cfg;
  2038. cfg.priv = &req_isp->hw_update_data;
  2039. cfg.init_packet = 0;
  2040. /*
  2041. * Offline mode may receive the SOF and REG_UPD earlier than
  2042. * CDM processing return back, so we set the substate before
  2043. * apply setting.
  2044. */
  2045. spin_lock_bh(&ctx->lock);
  2046. atomic_set(&ctx_isp->rxd_epoch, 0);
  2047. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
  2048. prev_applied_req = ctx_isp->last_applied_req_id;
  2049. ctx_isp->last_applied_req_id = req->request_id;
  2050. atomic_set(&ctx_isp->apply_in_progress, 1);
  2051. list_del_init(&req->list);
  2052. list_add_tail(&req->list, &ctx->wait_req_list);
  2053. spin_unlock_bh(&ctx->lock);
  2054. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  2055. if (rc) {
  2056. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
  2057. spin_lock_bh(&ctx->lock);
  2058. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2059. ctx_isp->last_applied_req_id = prev_applied_req;
  2060. atomic_set(&ctx_isp->apply_in_progress, 0);
  2061. list_del_init(&req->list);
  2062. list_add(&req->list, &ctx->pending_req_list);
  2063. spin_unlock_bh(&ctx->lock);
  2064. } else {
  2065. atomic_set(&ctx_isp->apply_in_progress, 0);
  2066. CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
  2067. CAM_ISP_CTX_ACTIVATED_APPLIED,
  2068. ctx_isp->last_applied_req_id);
  2069. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2070. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  2071. req->request_id);
  2072. }
  2073. end:
  2074. return rc;
  2075. }
  2076. static int __cam_isp_ctx_schedule_apply_req_offline(
  2077. struct cam_isp_context *ctx_isp)
  2078. {
  2079. int rc = 0;
  2080. struct crm_workq_task *task;
  2081. task = cam_req_mgr_workq_get_task(ctx_isp->workq);
  2082. if (!task) {
  2083. CAM_ERR(CAM_ISP, "No task for worker");
  2084. return -ENOMEM;
  2085. }
  2086. task->process_cb = __cam_isp_ctx_apply_req_offline;
  2087. rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0);
  2088. if (rc)
  2089. CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc);
  2090. return rc;
  2091. }
  2092. static int __cam_isp_ctx_offline_epoch_in_activated_state(
  2093. struct cam_isp_context *ctx_isp, void *evt_data)
  2094. {
  2095. struct cam_context *ctx = ctx_isp->base;
  2096. struct cam_ctx_request *req, *req_temp;
  2097. uint64_t request_id = 0;
  2098. atomic_set(&ctx_isp->rxd_epoch, 1);
  2099. CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
  2100. ctx->ctx_id);
  2101. /*
  2102. * For offline it is not possible for epoch to be generated without
  2103. * RUP done. IRQ scheduling delays can possibly cause this.
  2104. */
  2105. if (list_empty(&ctx->active_req_list)) {
  2106. CAM_WARN(CAM_ISP, "Active list empty on ctx: %u - EPOCH serviced before RUP",
  2107. ctx->ctx_id);
  2108. } else {
  2109. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  2110. if (req->request_id > ctx_isp->reported_req_id) {
  2111. request_id = req->request_id;
  2112. ctx_isp->reported_req_id = request_id;
  2113. break;
  2114. }
  2115. }
  2116. }
  2117. __cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
  2118. /*
  2119. * If no valid request, wait for RUP shutter posted after buf done
  2120. */
  2121. if (request_id)
  2122. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2123. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2124. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2125. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2126. request_id);
  2127. return 0;
  2128. }
  2129. static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
  2130. struct cam_isp_context *ctx_isp, void *evt_data)
  2131. {
  2132. if (ctx_isp->frame_id == 1)
  2133. CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR",
  2134. __cam_isp_ctx_substate_val_to_type(
  2135. ctx_isp->substate_activated));
  2136. else
  2137. CAM_WARN_RATE_LIMIT(CAM_ISP,
  2138. "ctx_id:%d Unexpected reg update in activated Substate[%s] for frame_id:%lld",
  2139. ctx_isp->base->ctx_id,
  2140. __cam_isp_ctx_substate_val_to_type(
  2141. ctx_isp->substate_activated),
  2142. ctx_isp->frame_id);
  2143. return 0;
  2144. }
  2145. static int __cam_isp_ctx_reg_upd_in_applied_state(
  2146. struct cam_isp_context *ctx_isp, void *evt_data)
  2147. {
  2148. int rc = 0;
  2149. struct cam_ctx_request *req;
  2150. struct cam_context *ctx = ctx_isp->base;
  2151. struct cam_isp_ctx_req *req_isp;
  2152. uint64_t request_id = 0;
  2153. if (list_empty(&ctx->wait_req_list)) {
  2154. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  2155. goto end;
  2156. }
  2157. req = list_first_entry(&ctx->wait_req_list,
  2158. struct cam_ctx_request, list);
  2159. list_del_init(&req->list);
  2160. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2161. if (req_isp->num_fence_map_out != 0) {
  2162. list_add_tail(&req->list, &ctx->active_req_list);
  2163. ctx_isp->active_req_cnt++;
  2164. request_id = req->request_id;
  2165. CAM_DBG(CAM_REQ,
  2166. "move request %lld to active list(cnt = %d), ctx %u",
  2167. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2168. __cam_isp_ctx_update_event_record(ctx_isp,
  2169. CAM_ISP_CTX_EVENT_RUP, req);
  2170. } else {
  2171. /* no io config, so the request is completed. */
  2172. list_add_tail(&req->list, &ctx->free_req_list);
  2173. CAM_DBG(CAM_ISP,
  2174. "move active request %lld to free list(cnt = %d), ctx %u",
  2175. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2176. }
  2177. /*
  2178. * This function only called directly from applied and bubble applied
  2179. * state so change substate here.
  2180. */
  2181. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2182. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2183. __cam_isp_ctx_substate_val_to_type(
  2184. ctx_isp->substate_activated));
  2185. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2186. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id);
  2187. end:
  2188. return rc;
  2189. }
  2190. static int __cam_isp_ctx_notify_sof_in_activated_state(
  2191. struct cam_isp_context *ctx_isp, void *evt_data)
  2192. {
  2193. int rc = 0;
  2194. uint64_t request_id = 0;
  2195. struct cam_context *ctx = ctx_isp->base;
  2196. struct cam_ctx_request *req;
  2197. struct cam_isp_ctx_req *req_isp;
  2198. struct cam_hw_cmd_args hw_cmd_args;
  2199. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  2200. uint64_t last_cdm_done_req = 0;
  2201. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2202. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2203. if (!evt_data) {
  2204. CAM_ERR(CAM_ISP, "invalid event data");
  2205. return -EINVAL;
  2206. }
  2207. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2208. if (atomic_read(&ctx_isp->process_bubble)) {
  2209. if (list_empty(&ctx->active_req_list)) {
  2210. CAM_ERR(CAM_ISP,
  2211. "No available active req in bubble");
  2212. atomic_set(&ctx_isp->process_bubble, 0);
  2213. ctx_isp->bubble_frame_cnt = 0;
  2214. rc = -EINVAL;
  2215. return rc;
  2216. }
  2217. if (ctx_isp->last_sof_timestamp ==
  2218. ctx_isp->sof_timestamp_val) {
  2219. CAM_DBG(CAM_ISP,
  2220. "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
  2221. ctx_isp->sof_timestamp_val);
  2222. goto notify_only;
  2223. }
  2224. req = list_first_entry(&ctx->active_req_list,
  2225. struct cam_ctx_request, list);
  2226. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2227. if (ctx_isp->bubble_frame_cnt >= 1 &&
  2228. req_isp->bubble_detected) {
  2229. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2230. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  2231. isp_hw_cmd_args.cmd_type =
  2232. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  2233. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  2234. rc = ctx->hw_mgr_intf->hw_cmd(
  2235. ctx->hw_mgr_intf->hw_mgr_priv,
  2236. &hw_cmd_args);
  2237. if (rc) {
  2238. CAM_ERR(CAM_ISP, "HW command failed");
  2239. return rc;
  2240. }
  2241. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  2242. CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
  2243. last_cdm_done_req);
  2244. if (last_cdm_done_req >= req->request_id) {
  2245. CAM_DBG(CAM_ISP,
  2246. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  2247. req->request_id);
  2248. ctx_isp->bubble_frame_cnt = 0;
  2249. } else {
  2250. CAM_DBG(CAM_ISP,
  2251. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  2252. req->request_id);
  2253. req_isp->num_acked = 0;
  2254. req_isp->num_deferred_acks = 0;
  2255. ctx_isp->bubble_frame_cnt = 0;
  2256. req_isp->bubble_detected = false;
  2257. req_isp->cdm_reset_before_apply = true;
  2258. list_del_init(&req->list);
  2259. list_add(&req->list, &ctx->pending_req_list);
  2260. atomic_set(&ctx_isp->process_bubble, 0);
  2261. ctx_isp->active_req_cnt--;
  2262. CAM_DBG(CAM_REQ,
  2263. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  2264. req->request_id,
  2265. ctx_isp->active_req_cnt, ctx->ctx_id);
  2266. }
  2267. } else if (req_isp->bubble_detected) {
  2268. ctx_isp->bubble_frame_cnt++;
  2269. CAM_DBG(CAM_ISP,
  2270. "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
  2271. req->request_id,
  2272. ctx_isp->bubble_frame_cnt);
  2273. } else {
  2274. CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
  2275. req->request_id);
  2276. }
  2277. }
  2278. notify_only:
  2279. /*
  2280. * notify reqmgr with sof signal. Note, due to scheduling delay
  2281. * we can run into situation that two active requests has already
  2282. * be in the active queue while we try to do the notification.
  2283. * In this case, we need to skip the current notification. This
  2284. * helps the state machine to catch up the delay.
  2285. */
  2286. if (ctx_isp->active_req_cnt <= 2) {
  2287. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2288. list_for_each_entry(req, &ctx->active_req_list, list) {
  2289. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2290. if ((!req_isp->bubble_detected) &&
  2291. (req->request_id > ctx_isp->reported_req_id)) {
  2292. request_id = req->request_id;
  2293. __cam_isp_ctx_update_event_record(ctx_isp,
  2294. CAM_ISP_CTX_EVENT_EPOCH, req);
  2295. break;
  2296. }
  2297. }
  2298. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
  2299. request_id = 0;
  2300. if (request_id != 0)
  2301. ctx_isp->reported_req_id = request_id;
  2302. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2303. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2304. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2305. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2306. request_id);
  2307. }
  2308. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  2309. return 0;
  2310. }
  2311. static int __cam_isp_ctx_notify_eof_in_activated_state(
  2312. struct cam_isp_context *ctx_isp, void *evt_data)
  2313. {
  2314. int rc = 0;
  2315. /* notify reqmgr with eof signal */
  2316. rc = __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_EOF, ctx_isp);
  2317. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2318. CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0);
  2319. return rc;
  2320. }
  2321. static int __cam_isp_ctx_reg_upd_in_hw_error(
  2322. struct cam_isp_context *ctx_isp, void *evt_data)
  2323. {
  2324. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2325. return 0;
  2326. }
  2327. static int __cam_isp_ctx_sof_in_activated_state(
  2328. struct cam_isp_context *ctx_isp, void *evt_data)
  2329. {
  2330. int rc = 0;
  2331. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2332. struct cam_ctx_request *req = NULL;
  2333. struct cam_context *ctx = ctx_isp->base;
  2334. uint64_t request_id = 0;
  2335. /* First check if there is a valid request in active list */
  2336. list_for_each_entry(req, &ctx->active_req_list, list) {
  2337. if (req->request_id > ctx_isp->reported_req_id) {
  2338. request_id = req->request_id;
  2339. break;
  2340. }
  2341. }
  2342. /*
  2343. * If nothing in active list, current request might have not moved
  2344. * from wait to active list. This could happen if REG_UPDATE to sw
  2345. * is coming immediately after SOF
  2346. */
  2347. if (request_id == 0) {
  2348. req = list_first_entry(&ctx->wait_req_list,
  2349. struct cam_ctx_request, list);
  2350. if (req)
  2351. request_id = req->request_id;
  2352. }
  2353. if (!evt_data) {
  2354. CAM_ERR(CAM_ISP, "in valid sof event data");
  2355. return -EINVAL;
  2356. }
  2357. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2358. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2359. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2360. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u",
  2361. ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);
  2362. return rc;
  2363. }
  2364. static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2365. void *evt_data)
  2366. {
  2367. int rc = 0;
  2368. struct cam_ctx_request *req = NULL;
  2369. struct cam_isp_ctx_req *req_isp;
  2370. struct cam_context *ctx = ctx_isp->base;
  2371. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2372. CAM_DBG(CAM_ISP, "invalid RUP");
  2373. goto end;
  2374. }
  2375. /*
  2376. * This is for the first update. The initial setting will
  2377. * cause the reg_upd in the first frame.
  2378. */
  2379. if (!list_empty(&ctx->wait_req_list)) {
  2380. req = list_first_entry(&ctx->wait_req_list,
  2381. struct cam_ctx_request, list);
  2382. list_del_init(&req->list);
  2383. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2384. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2385. list_add_tail(&req->list, &ctx->free_req_list);
  2386. else
  2387. CAM_ERR(CAM_ISP,
  2388. "receive rup in unexpected state");
  2389. }
  2390. if (req != NULL) {
  2391. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2392. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2393. req->request_id);
  2394. }
  2395. end:
  2396. return rc;
  2397. }
  2398. static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
  2399. void *evt_data)
  2400. {
  2401. uint64_t request_id = 0;
  2402. uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
  2403. struct cam_ctx_request *req;
  2404. struct cam_isp_ctx_req *req_isp;
  2405. struct cam_context *ctx = ctx_isp->base;
  2406. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2407. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2408. if (!evt_data) {
  2409. CAM_ERR(CAM_ISP, "invalid event data");
  2410. return -EINVAL;
  2411. }
  2412. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2413. if (list_empty(&ctx->wait_req_list)) {
  2414. /*
  2415. * If no wait req in epoch, this is an error case.
  2416. * The recovery is to go back to sof state
  2417. */
  2418. CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
  2419. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2420. /* Send SOF event as empty frame*/
  2421. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2422. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2423. __cam_isp_ctx_update_event_record(ctx_isp,
  2424. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2425. goto end;
  2426. }
  2427. /* Update state prior to notifying CRM */
  2428. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2429. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2430. list);
  2431. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2432. req_isp->bubble_detected = true;
  2433. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2434. req_isp->cdm_reset_before_apply = false;
  2435. atomic_set(&ctx_isp->process_bubble, 1);
  2436. CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
  2437. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2438. if (req_isp->bubble_report) {
  2439. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2440. req->request_id, ctx_isp);
  2441. trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
  2442. req->request_id, ctx->ctx_id);
  2443. } else {
  2444. req_isp->bubble_report = 0;
  2445. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2446. req->request_id, ctx->ctx_id);
  2447. if (ctx_isp->active_req_cnt <= 1)
  2448. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2449. }
  2450. /*
  2451. * Always move the request to active list. Let buf done
  2452. * function handles the rest.
  2453. */
  2454. list_del_init(&req->list);
  2455. list_add_tail(&req->list, &ctx->active_req_list);
  2456. ctx_isp->active_req_cnt++;
  2457. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
  2458. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2459. /*
  2460. * Handle the deferred buf done after moving
  2461. * the bubble req to active req list.
  2462. */
  2463. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2464. ctx_isp, req);
  2465. /*
  2466. * Update the record before req pointer to
  2467. * other invalid req.
  2468. */
  2469. __cam_isp_ctx_update_event_record(ctx_isp,
  2470. CAM_ISP_CTX_EVENT_EPOCH, req);
  2471. /*
  2472. * Get the req again from active_req_list in case
  2473. * the active req cnt is 2.
  2474. */
  2475. list_for_each_entry(req, &ctx->active_req_list, list) {
  2476. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2477. if ((!req_isp->bubble_report) &&
  2478. (req->request_id > ctx_isp->reported_req_id)) {
  2479. request_id = req->request_id;
  2480. ctx_isp->reported_req_id = request_id;
  2481. CAM_DBG(CAM_ISP,
  2482. "ctx %d reported_req_id update to %lld",
  2483. ctx->ctx_id, ctx_isp->reported_req_id);
  2484. break;
  2485. }
  2486. }
  2487. if ((request_id != 0) && req_isp->bubble_detected)
  2488. sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR;
  2489. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2490. sof_event_status);
  2491. cam_req_mgr_debug_delay_detect();
  2492. trace_cam_delay_detect("ISP",
  2493. "bubble epoch_in_applied", req->request_id,
  2494. ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
  2495. CAM_DEFAULT_VALUE);
  2496. end:
  2497. if (request_id == 0) {
  2498. req = list_last_entry(&ctx->active_req_list,
  2499. struct cam_ctx_request, list);
  2500. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2501. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2502. } else {
  2503. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2504. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
  2505. }
  2506. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2507. __cam_isp_ctx_substate_val_to_type(
  2508. ctx_isp->substate_activated));
  2509. return 0;
  2510. }
  2511. static int __cam_isp_ctx_buf_done_in_sof(struct cam_isp_context *ctx_isp,
  2512. void *evt_data)
  2513. {
  2514. int rc = 0;
  2515. struct cam_isp_hw_done_event_data *done =
  2516. (struct cam_isp_hw_done_event_data *) evt_data;
  2517. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2518. return rc;
  2519. }
  2520. static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
  2521. void *evt_data)
  2522. {
  2523. int rc = 0;
  2524. struct cam_isp_hw_done_event_data *done =
  2525. (struct cam_isp_hw_done_event_data *) evt_data;
  2526. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2527. return rc;
  2528. }
  2529. static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
  2530. void *evt_data)
  2531. {
  2532. int rc = 0;
  2533. struct cam_context *ctx = ctx_isp->base;
  2534. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2535. struct cam_ctx_request *req;
  2536. if (!evt_data) {
  2537. CAM_ERR(CAM_ISP, "in valid sof event data");
  2538. return -EINVAL;
  2539. }
  2540. if (atomic_read(&ctx_isp->apply_in_progress))
  2541. CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
  2542. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2543. if (list_empty(&ctx->active_req_list))
  2544. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2545. else
  2546. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  2547. req = list_last_entry(&ctx->active_req_list,
  2548. struct cam_ctx_request, list);
  2549. if (req)
  2550. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2551. CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
  2552. req->request_id);
  2553. if (ctx_isp->frame_id == 1)
  2554. CAM_INFO(CAM_ISP,
  2555. "First SOF in EPCR ctx:%d frame_id:%lld next substate %s",
  2556. ctx->ctx_id, ctx_isp->frame_id,
  2557. __cam_isp_ctx_substate_val_to_type(
  2558. ctx_isp->substate_activated));
  2559. CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
  2560. ctx->ctx_id, ctx_isp->frame_id,
  2561. __cam_isp_ctx_substate_val_to_type(
  2562. ctx_isp->substate_activated));
  2563. return rc;
  2564. }
  2565. static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2566. void *evt_data)
  2567. {
  2568. int rc = 0;
  2569. struct cam_isp_hw_done_event_data *done =
  2570. (struct cam_isp_hw_done_event_data *) evt_data;
  2571. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2572. return rc;
  2573. }
  2574. static int __cam_isp_ctx_buf_done_in_bubble(
  2575. struct cam_isp_context *ctx_isp, void *evt_data)
  2576. {
  2577. int rc = 0;
  2578. struct cam_isp_hw_done_event_data *done =
  2579. (struct cam_isp_hw_done_event_data *) evt_data;
  2580. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2581. return rc;
  2582. }
  2583. static int __cam_isp_ctx_epoch_in_bubble_applied(
  2584. struct cam_isp_context *ctx_isp, void *evt_data)
  2585. {
  2586. uint64_t request_id = 0;
  2587. struct cam_ctx_request *req;
  2588. struct cam_isp_ctx_req *req_isp;
  2589. struct cam_context *ctx = ctx_isp->base;
  2590. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2591. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2592. if (!evt_data) {
  2593. CAM_ERR(CAM_ISP, "invalid event data");
  2594. return -EINVAL;
  2595. }
  2596. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2597. /*
  2598. * This means we missed the reg upd ack. So we need to
  2599. * transition to BUBBLE state again.
  2600. */
  2601. if (list_empty(&ctx->wait_req_list)) {
  2602. /*
  2603. * If no pending req in epoch, this is an error case.
  2604. * Just go back to the bubble state.
  2605. */
  2606. CAM_ERR(CAM_ISP, "ctx:%d No pending request.", ctx->ctx_id);
  2607. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2608. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2609. __cam_isp_ctx_update_event_record(ctx_isp,
  2610. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2611. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2612. goto end;
  2613. }
  2614. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2615. list);
  2616. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2617. req_isp->bubble_detected = true;
  2618. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  2619. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2620. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2621. req_isp->cdm_reset_before_apply = false;
  2622. if (req_isp->bubble_report) {
  2623. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2624. req->request_id, ctx_isp);
  2625. atomic_set(&ctx_isp->process_bubble, 1);
  2626. } else {
  2627. req_isp->bubble_report = 0;
  2628. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2629. req->request_id, ctx->ctx_id);
  2630. if (ctx_isp->active_req_cnt <= 1)
  2631. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2632. atomic_set(&ctx_isp->process_bubble, 1);
  2633. }
  2634. /*
  2635. * Always move the request to active list. Let buf done
  2636. * function handles the rest.
  2637. */
  2638. list_del_init(&req->list);
  2639. list_add_tail(&req->list, &ctx->active_req_list);
  2640. ctx_isp->active_req_cnt++;
  2641. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u",
  2642. req->request_id, ctx_isp->active_req_cnt);
  2643. if (!req_isp->bubble_report) {
  2644. if (req->request_id > ctx_isp->reported_req_id) {
  2645. request_id = req->request_id;
  2646. ctx_isp->reported_req_id = request_id;
  2647. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2648. CAM_REQ_MGR_SOF_EVENT_ERROR);
  2649. __cam_isp_ctx_update_event_record(ctx_isp,
  2650. CAM_ISP_CTX_EVENT_EPOCH, req);
  2651. } else {
  2652. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2653. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2654. __cam_isp_ctx_update_event_record(ctx_isp,
  2655. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2656. }
  2657. } else {
  2658. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2659. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2660. __cam_isp_ctx_update_event_record(ctx_isp,
  2661. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2662. }
  2663. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2664. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2665. __cam_isp_ctx_substate_val_to_type(
  2666. ctx_isp->substate_activated));
  2667. cam_req_mgr_debug_delay_detect();
  2668. trace_cam_delay_detect("ISP",
  2669. "bubble epoch_in_bubble_applied",
  2670. req->request_id, ctx->ctx_id,
  2671. ctx->link_hdl, ctx->session_hdl,
  2672. CAM_DEFAULT_VALUE);
  2673. end:
  2674. req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
  2675. list);
  2676. if (req)
  2677. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2678. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2679. return 0;
  2680. }
  2681. static int __cam_isp_ctx_buf_done_in_bubble_applied(
  2682. struct cam_isp_context *ctx_isp, void *evt_data)
  2683. {
  2684. int rc = 0;
  2685. struct cam_isp_hw_done_event_data *done =
  2686. (struct cam_isp_hw_done_event_data *) evt_data;
  2687. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2688. return rc;
  2689. }
  2690. static uint32_t get_evt_param(uint32_t error_type)
  2691. {
  2692. switch (error_type) {
  2693. case CAM_ISP_HW_ERROR_OVERFLOW:
  2694. return CAM_SYNC_ISP_EVENT_OVERFLOW;
  2695. case CAM_ISP_HW_ERROR_P2I_ERROR:
  2696. return CAM_SYNC_ISP_EVENT_P2I_ERROR;
  2697. case CAM_ISP_HW_ERROR_VIOLATION:
  2698. return CAM_SYNC_ISP_EVENT_VIOLATION;
  2699. case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
  2700. return CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW;
  2701. default:
  2702. return CAM_SYNC_ISP_EVENT_UNKNOWN;
  2703. }
  2704. }
  2705. static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
  2706. void *evt_data)
  2707. {
  2708. int rc = 0;
  2709. enum cam_req_mgr_device_error error;
  2710. uint32_t i = 0;
  2711. bool found = 0;
  2712. struct cam_ctx_request *req = NULL;
  2713. struct cam_ctx_request *req_to_report = NULL;
  2714. struct cam_ctx_request *req_to_dump = NULL;
  2715. struct cam_ctx_request *req_temp;
  2716. struct cam_isp_ctx_req *req_isp = NULL;
  2717. struct cam_isp_ctx_req *req_isp_to_report = NULL;
  2718. uint64_t error_request_id;
  2719. struct cam_hw_fence_map_entry *fence_map_out = NULL;
  2720. uint32_t evt_param;
  2721. struct cam_context *ctx = ctx_isp->base;
  2722. struct cam_isp_hw_error_event_data *error_event_data =
  2723. (struct cam_isp_hw_error_event_data *)evt_data;
  2724. uint32_t error_type = error_event_data->error_type;
  2725. CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
  2726. if (!ctx_isp->offline_context)
  2727. __cam_isp_ctx_pause_crm_timer(ctx);
  2728. if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
  2729. (error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) ||
  2730. (error_type == CAM_ISP_HW_ERROR_VIOLATION))
  2731. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  2732. evt_param = get_evt_param(error_type);
  2733. /*
  2734. * The error is likely caused by first request on the active list.
  2735. * If active list is empty check wait list (maybe error hit as soon
  2736. * as RUP and we handle error before RUP.
  2737. */
  2738. if (list_empty(&ctx->active_req_list)) {
  2739. CAM_DBG(CAM_ISP,
  2740. "handling error with no active request");
  2741. if (list_empty(&ctx->wait_req_list)) {
  2742. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2743. "Error with no active/wait request");
  2744. goto end;
  2745. } else {
  2746. req_to_dump = list_first_entry(&ctx->wait_req_list,
  2747. struct cam_ctx_request, list);
  2748. }
  2749. } else {
  2750. req_to_dump = list_first_entry(&ctx->active_req_list,
  2751. struct cam_ctx_request, list);
  2752. }
  2753. req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
  2754. if (error_event_data->enable_req_dump)
  2755. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  2756. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2757. CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
  2758. list_for_each_entry_safe(req, req_temp,
  2759. &ctx->active_req_list, list) {
  2760. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2761. if (!req_isp->bubble_report) {
  2762. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  2763. req->request_id);
  2764. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2765. fence_map_out =
  2766. &req_isp->fence_map_out[i];
  2767. if (req_isp->fence_map_out[i].sync_id != -1) {
  2768. CAM_DBG(CAM_ISP,
  2769. "req %llu, Sync fd 0x%x ctx %u",
  2770. req->request_id,
  2771. req_isp->fence_map_out[i].sync_id,
  2772. ctx->ctx_id);
  2773. rc = cam_sync_signal(
  2774. fence_map_out->sync_id,
  2775. CAM_SYNC_STATE_SIGNALED_ERROR,
  2776. evt_param);
  2777. fence_map_out->sync_id = -1;
  2778. }
  2779. }
  2780. list_del_init(&req->list);
  2781. list_add_tail(&req->list, &ctx->free_req_list);
  2782. ctx_isp->active_req_cnt--;
  2783. } else {
  2784. found = 1;
  2785. break;
  2786. }
  2787. }
  2788. if (found)
  2789. goto move_to_pending;
  2790. list_for_each_entry_safe(req, req_temp,
  2791. &ctx->wait_req_list, list) {
  2792. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2793. if (!req_isp->bubble_report) {
  2794. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  2795. req->request_id);
  2796. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2797. fence_map_out =
  2798. &req_isp->fence_map_out[i];
  2799. if (req_isp->fence_map_out[i].sync_id != -1) {
  2800. CAM_DBG(CAM_ISP,
  2801. "req %llu, Sync fd 0x%x ctx %u",
  2802. req->request_id,
  2803. req_isp->fence_map_out[i].sync_id,
  2804. ctx->ctx_id);
  2805. rc = cam_sync_signal(
  2806. fence_map_out->sync_id,
  2807. CAM_SYNC_STATE_SIGNALED_ERROR,
  2808. evt_param);
  2809. fence_map_out->sync_id = -1;
  2810. }
  2811. }
  2812. list_del_init(&req->list);
  2813. list_add_tail(&req->list, &ctx->free_req_list);
  2814. } else {
  2815. found = 1;
  2816. break;
  2817. }
  2818. }
  2819. move_to_pending:
  2820. /*
  2821. * If bubble recovery is enabled on any request we need to move that
  2822. * request and all the subsequent requests to the pending list.
  2823. * Note:
  2824. * We need to traverse the active list in reverse order and add
  2825. * to head of pending list.
  2826. * e.g. pending current state: 10, 11 | active current state: 8, 9
  2827. * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
  2828. * final state - pending: 8, 9, 10, 11 | active: NULL
  2829. */
  2830. if (found) {
  2831. list_for_each_entry_safe_reverse(req, req_temp,
  2832. &ctx->active_req_list, list) {
  2833. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2834. list_del_init(&req->list);
  2835. list_add(&req->list, &ctx->pending_req_list);
  2836. ctx_isp->active_req_cnt--;
  2837. }
  2838. list_for_each_entry_safe_reverse(req, req_temp,
  2839. &ctx->wait_req_list, list) {
  2840. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2841. list_del_init(&req->list);
  2842. list_add(&req->list, &ctx->pending_req_list);
  2843. }
  2844. }
  2845. end:
  2846. do {
  2847. if (list_empty(&ctx->pending_req_list)) {
  2848. error_request_id = ctx_isp->last_applied_req_id;
  2849. req_isp = NULL;
  2850. break;
  2851. }
  2852. req = list_first_entry(&ctx->pending_req_list,
  2853. struct cam_ctx_request, list);
  2854. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2855. error_request_id = ctx_isp->last_applied_req_id;
  2856. if (req_isp->bubble_report) {
  2857. req_to_report = req;
  2858. req_isp_to_report = req_to_report->req_priv;
  2859. break;
  2860. }
  2861. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2862. if (req_isp->fence_map_out[i].sync_id != -1)
  2863. rc = cam_sync_signal(
  2864. req_isp->fence_map_out[i].sync_id,
  2865. CAM_SYNC_STATE_SIGNALED_ERROR,
  2866. evt_param);
  2867. req_isp->fence_map_out[i].sync_id = -1;
  2868. }
  2869. list_del_init(&req->list);
  2870. list_add_tail(&req->list, &ctx->free_req_list);
  2871. } while (req->request_id < ctx_isp->last_applied_req_id);
  2872. if (ctx_isp->offline_context)
  2873. goto exit;
  2874. error = CRM_KMD_ERR_FATAL;
  2875. if (req_isp_to_report && req_isp_to_report->bubble_report)
  2876. if (error_event_data->recovery_enabled)
  2877. error = CRM_KMD_ERR_BUBBLE;
  2878. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, error,
  2879. error_request_id, ctx_isp);
  2880. /*
  2881. * Need to send error occurred in KMD
  2882. * This will help UMD to take necessary action
  2883. * and to dump relevant info
  2884. */
  2885. if (error == CRM_KMD_ERR_FATAL) {
  2886. uint32_t req_mgr_error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2887. if (error_type == CAM_ISP_HW_ERROR_CSID_FATAL)
  2888. req_mgr_error_type =
  2889. CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2890. __cam_isp_ctx_notify_v4l2_error_event(req_mgr_error_type,
  2891. error_event_data->error_code, error_request_id, ctx);
  2892. }
  2893. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
  2894. CAM_DBG(CAM_ISP, "Handling error done on ctx: %u", ctx->ctx_id);
  2895. exit:
  2896. return rc;
  2897. }
  2898. static int __cam_isp_ctx_fs2_sof_in_sof_state(
  2899. struct cam_isp_context *ctx_isp, void *evt_data)
  2900. {
  2901. int rc = 0;
  2902. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2903. struct cam_ctx_request *req;
  2904. struct cam_context *ctx = ctx_isp->base;
  2905. uint64_t request_id = 0;
  2906. if (!evt_data) {
  2907. CAM_ERR(CAM_ISP, "in valid sof event data");
  2908. return -EINVAL;
  2909. }
  2910. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2911. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  2912. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  2913. if (!(list_empty(&ctx->wait_req_list)))
  2914. goto end;
  2915. if (ctx_isp->active_req_cnt <= 2) {
  2916. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2917. list_for_each_entry(req, &ctx->active_req_list, list) {
  2918. if (req->request_id > ctx_isp->reported_req_id) {
  2919. request_id = req->request_id;
  2920. ctx_isp->reported_req_id = request_id;
  2921. break;
  2922. }
  2923. }
  2924. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2925. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2926. }
  2927. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2928. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2929. end:
  2930. return rc;
  2931. }
  2932. static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp,
  2933. void *evt_data)
  2934. {
  2935. int rc = 0;
  2936. struct cam_isp_hw_done_event_data *done =
  2937. (struct cam_isp_hw_done_event_data *) evt_data;
  2938. struct cam_context *ctx = ctx_isp->base;
  2939. int prev_active_req_cnt = 0;
  2940. int curr_req_id = 0;
  2941. struct cam_ctx_request *req;
  2942. prev_active_req_cnt = ctx_isp->active_req_cnt;
  2943. req = list_first_entry(&ctx->active_req_list,
  2944. struct cam_ctx_request, list);
  2945. if (req)
  2946. curr_req_id = req->request_id;
  2947. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2948. if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) {
  2949. if (list_empty(&ctx->wait_req_list) &&
  2950. list_empty(&ctx->active_req_list)) {
  2951. CAM_DBG(CAM_ISP, "No request, move to SOF");
  2952. ctx_isp->substate_activated =
  2953. CAM_ISP_CTX_ACTIVATED_SOF;
  2954. if (ctx_isp->reported_req_id < curr_req_id) {
  2955. ctx_isp->reported_req_id = curr_req_id;
  2956. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  2957. curr_req_id,
  2958. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2959. }
  2960. }
  2961. }
  2962. return rc;
  2963. }
  2964. static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2965. void *evt_data)
  2966. {
  2967. int rc = 0;
  2968. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  2969. return rc;
  2970. }
  2971. static int __cam_isp_ctx_fs2_buf_done_in_applied(
  2972. struct cam_isp_context *ctx_isp,
  2973. void *evt_data)
  2974. {
  2975. int rc = 0;
  2976. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  2977. return rc;
  2978. }
  2979. static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2980. void *evt_data)
  2981. {
  2982. int rc = 0;
  2983. struct cam_ctx_request *req = NULL;
  2984. struct cam_isp_ctx_req *req_isp;
  2985. struct cam_context *ctx = ctx_isp->base;
  2986. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2987. CAM_DBG(CAM_ISP, "invalid RUP");
  2988. goto end;
  2989. }
  2990. /*
  2991. * This is for the first update. The initial setting will
  2992. * cause the reg_upd in the first frame.
  2993. */
  2994. if (!list_empty(&ctx->wait_req_list)) {
  2995. req = list_first_entry(&ctx->wait_req_list,
  2996. struct cam_ctx_request, list);
  2997. list_del_init(&req->list);
  2998. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2999. if (req_isp->num_fence_map_out == req_isp->num_acked)
  3000. list_add_tail(&req->list, &ctx->free_req_list);
  3001. else
  3002. CAM_ERR(CAM_ISP,
  3003. "receive rup in unexpected state");
  3004. }
  3005. if (req != NULL) {
  3006. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3007. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3008. req->request_id);
  3009. }
  3010. end:
  3011. return rc;
  3012. }
  3013. static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
  3014. struct cam_isp_context *ctx_isp, void *evt_data)
  3015. {
  3016. int rc = 0;
  3017. struct cam_ctx_request *req = NULL;
  3018. struct cam_context *ctx = ctx_isp->base;
  3019. struct cam_isp_ctx_req *req_isp;
  3020. uint64_t request_id = 0;
  3021. if (list_empty(&ctx->wait_req_list)) {
  3022. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  3023. goto end;
  3024. }
  3025. req = list_first_entry(&ctx->wait_req_list,
  3026. struct cam_ctx_request, list);
  3027. list_del_init(&req->list);
  3028. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3029. if (req_isp->num_fence_map_out != 0) {
  3030. list_add_tail(&req->list, &ctx->active_req_list);
  3031. ctx_isp->active_req_cnt++;
  3032. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
  3033. req->request_id, ctx_isp->active_req_cnt);
  3034. } else {
  3035. /* no io config, so the request is completed. */
  3036. list_add_tail(&req->list, &ctx->free_req_list);
  3037. }
  3038. /*
  3039. * This function only called directly from applied and bubble applied
  3040. * state so change substate here.
  3041. */
  3042. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  3043. if (req_isp->num_fence_map_out != 1)
  3044. goto end;
  3045. if (ctx_isp->active_req_cnt <= 2) {
  3046. list_for_each_entry(req, &ctx->active_req_list, list) {
  3047. if (req->request_id > ctx_isp->reported_req_id) {
  3048. request_id = req->request_id;
  3049. ctx_isp->reported_req_id = request_id;
  3050. break;
  3051. }
  3052. }
  3053. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3054. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3055. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3056. }
  3057. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3058. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated));
  3059. end:
  3060. if (req != NULL && !rc) {
  3061. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3062. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3063. req->request_id);
  3064. }
  3065. return rc;
  3066. }
  3067. static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
  3068. struct cam_isp_context *ctx_isp)
  3069. {
  3070. struct cam_context *ctx = ctx_isp->base;
  3071. if ((++ctx_isp->aeb_error_cnt) <= CAM_ISP_CONTEXT_AEB_ERROR_CNT_MAX) {
  3072. CAM_WARN(CAM_ISP,
  3073. "AEB slave RDI's current request's SOF seen after next req is applied for ctx: %u on link: 0x%x last_applied_req: %llu err_cnt: %u",
  3074. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, ctx_isp->aeb_error_cnt);
  3075. return;
  3076. }
  3077. CAM_ERR(CAM_ISP,
  3078. "Fatal - AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x err_cnt: %u",
  3079. ctx->ctx_id, ctx->link_hdl, ctx_isp->aeb_error_cnt);
  3080. /* Pause CRM timer */
  3081. if (!ctx_isp->offline_context)
  3082. __cam_isp_ctx_pause_crm_timer(ctx);
  3083. /* Trigger reg dump */
  3084. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3085. /* Notify CRM on fatal error */
  3086. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL,
  3087. ctx_isp->last_applied_req_id, ctx_isp);
  3088. /* Notify userland on error */
  3089. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
  3090. CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx);
  3091. /* Change state to HALT, stop further processing of HW events */
  3092. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  3093. }
  3094. static int __cam_isp_ctx_trigger_internal_recovery(
  3095. bool sync_frame_drop, struct cam_isp_context *ctx_isp)
  3096. {
  3097. int rc = 0;
  3098. bool do_recovery = true;
  3099. struct cam_context *ctx = ctx_isp->base;
  3100. struct cam_ctx_request *req = NULL;
  3101. struct cam_isp_ctx_req *req_isp = NULL;
  3102. if (list_empty(&ctx->wait_req_list)) {
  3103. /*
  3104. * If the wait list is empty, and we encounter a "silent" frame drop
  3105. * then the settings applied on the previous frame, did not reflect
  3106. * at the next frame boundary, it's expected to latch a frame after.
  3107. * No need to recover. If it's an out of sync drop use pending req
  3108. */
  3109. if (sync_frame_drop && !list_empty(&ctx->pending_req_list))
  3110. req = list_first_entry(&ctx->pending_req_list,
  3111. struct cam_ctx_request, list);
  3112. else
  3113. do_recovery = false;
  3114. }
  3115. /* If both wait and pending list have no request to recover on */
  3116. if (!do_recovery) {
  3117. CAM_WARN(CAM_ISP,
  3118. "No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld",
  3119. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id,
  3120. ctx_isp->req_info.last_bufdone_req_id);
  3121. goto end;
  3122. }
  3123. if (!req) {
  3124. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  3125. if (req->request_id != ctx_isp->last_applied_req_id)
  3126. CAM_WARN(CAM_ISP,
  3127. "Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x",
  3128. req->request_id, ctx_isp->last_applied_req_id,
  3129. ctx->ctx_id, ctx->link_hdl);
  3130. }
  3131. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3132. /*
  3133. * Treat this as bubble, after recovery re-start from appropriate sub-state
  3134. * This will block servicing any further apply calls from CRM
  3135. */
  3136. atomic_set(&ctx_isp->internal_recovery_set, 1);
  3137. atomic_set(&ctx_isp->process_bubble, 1);
  3138. ctx_isp->recovery_req_id = req->request_id;
  3139. /* Wait for active request's to finish before issuing recovery */
  3140. if (ctx_isp->active_req_cnt) {
  3141. req_isp->bubble_detected = true;
  3142. CAM_WARN(CAM_ISP,
  3143. "Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x",
  3144. ctx_isp->active_req_cnt, ctx_isp->recovery_req_id,
  3145. ctx->ctx_id, ctx->link_hdl);
  3146. } else {
  3147. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  3148. ctx_isp->recovery_req_id, ctx_isp);
  3149. if (rc) {
  3150. /* Unable to do bubble recovery reset back to normal */
  3151. CAM_WARN(CAM_ISP,
  3152. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  3153. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3154. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  3155. goto end;
  3156. }
  3157. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  3158. list_del_init(&req->list);
  3159. list_add(&req->list, &ctx->pending_req_list);
  3160. }
  3161. end:
  3162. return rc;
  3163. }
  3164. static int __cam_isp_ctx_handle_secondary_events(
  3165. struct cam_isp_context *ctx_isp, void *evt_data)
  3166. {
  3167. int rc = 0;
  3168. bool recover = false, sync_frame_drop = false;
  3169. struct cam_context *ctx = ctx_isp->base;
  3170. struct cam_isp_hw_secondary_event_data *sec_evt_data =
  3171. (struct cam_isp_hw_secondary_event_data *)evt_data;
  3172. /* Current scheme to handle only for custom AEB */
  3173. if (!ctx_isp->aeb_enabled) {
  3174. CAM_WARN(CAM_ISP,
  3175. "Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u",
  3176. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3177. goto end;
  3178. }
  3179. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3180. CAM_WARN(CAM_ISP,
  3181. "Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u",
  3182. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3183. goto end;
  3184. }
  3185. /*
  3186. * In case of custom AEB ensure first exposure frame has
  3187. * not moved forward with its settings without second/third
  3188. * expoure frame coming in. Also track for bubble, in case of system
  3189. * delays it's possible for the IFE settings to be not written to
  3190. * HW on a given frame. If these scenarios occurs flag as error,
  3191. * and recover.
  3192. */
  3193. switch (sec_evt_data->evt_type) {
  3194. case CAM_ISP_HW_SEC_EVENT_SOF:
  3195. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3196. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
  3197. ctx_isp->last_applied_req_id);
  3198. /* Slave RDI's frame starting post IFE EPOCH - Fatal */
  3199. if ((ctx_isp->substate_activated ==
  3200. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3201. (ctx_isp->substate_activated ==
  3202. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED))
  3203. __cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp);
  3204. else
  3205. /* Reset error count */
  3206. ctx_isp->aeb_error_cnt = 0;
  3207. break;
  3208. case CAM_ISP_HW_SEC_EVENT_EPOCH:
  3209. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3210. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
  3211. ctx_isp->last_applied_req_id);
  3212. /*
  3213. * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP
  3214. * On such occasions use CSID CAMIF EPOCH for bubble detection, flag
  3215. * on detection and perform necessary bubble recovery
  3216. */
  3217. if ((ctx_isp->substate_activated ==
  3218. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3219. (ctx_isp->substate_activated ==
  3220. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
  3221. recover = true;
  3222. CAM_WARN(CAM_ISP,
  3223. "Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3224. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3225. }
  3226. break;
  3227. case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP:
  3228. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3229. CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
  3230. ctx_isp->last_applied_req_id);
  3231. /* Avoid recovery loop if frame is dropped at stream on */
  3232. if (!ctx_isp->frame_id) {
  3233. CAM_ERR(CAM_ISP,
  3234. "Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld",
  3235. ctx->ctx_id, ctx->link_hdl,
  3236. ctx_isp->frame_id, ctx_isp->last_applied_req_id);
  3237. rc = -EPERM;
  3238. break;
  3239. }
  3240. recover = true;
  3241. sync_frame_drop = true;
  3242. CAM_WARN(CAM_ISP,
  3243. "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3244. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3245. break;
  3246. default:
  3247. break;
  3248. }
  3249. if (recover && ctx_isp->do_internal_recovery)
  3250. rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp);
  3251. end:
  3252. return rc;
  3253. }
  3254. static struct cam_isp_ctx_irq_ops
  3255. cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3256. /* SOF */
  3257. {
  3258. .irq_ops = {
  3259. __cam_isp_ctx_handle_error,
  3260. __cam_isp_ctx_sof_in_activated_state,
  3261. __cam_isp_ctx_reg_upd_in_sof,
  3262. __cam_isp_ctx_notify_sof_in_activated_state,
  3263. __cam_isp_ctx_notify_eof_in_activated_state,
  3264. __cam_isp_ctx_buf_done_in_sof,
  3265. __cam_isp_ctx_handle_secondary_events,
  3266. },
  3267. },
  3268. /* APPLIED */
  3269. {
  3270. .irq_ops = {
  3271. __cam_isp_ctx_handle_error,
  3272. __cam_isp_ctx_sof_in_activated_state,
  3273. __cam_isp_ctx_reg_upd_in_applied_state,
  3274. __cam_isp_ctx_epoch_in_applied,
  3275. __cam_isp_ctx_notify_eof_in_activated_state,
  3276. __cam_isp_ctx_buf_done_in_applied,
  3277. __cam_isp_ctx_handle_secondary_events,
  3278. },
  3279. },
  3280. /* EPOCH */
  3281. {
  3282. .irq_ops = {
  3283. __cam_isp_ctx_handle_error,
  3284. __cam_isp_ctx_sof_in_epoch,
  3285. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3286. __cam_isp_ctx_notify_sof_in_activated_state,
  3287. __cam_isp_ctx_notify_eof_in_activated_state,
  3288. __cam_isp_ctx_buf_done_in_epoch,
  3289. __cam_isp_ctx_handle_secondary_events,
  3290. },
  3291. },
  3292. /* BUBBLE */
  3293. {
  3294. .irq_ops = {
  3295. __cam_isp_ctx_handle_error,
  3296. __cam_isp_ctx_sof_in_activated_state,
  3297. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3298. __cam_isp_ctx_notify_sof_in_activated_state,
  3299. __cam_isp_ctx_notify_eof_in_activated_state,
  3300. __cam_isp_ctx_buf_done_in_bubble,
  3301. __cam_isp_ctx_handle_secondary_events,
  3302. },
  3303. },
  3304. /* Bubble Applied */
  3305. {
  3306. .irq_ops = {
  3307. __cam_isp_ctx_handle_error,
  3308. __cam_isp_ctx_sof_in_activated_state,
  3309. __cam_isp_ctx_reg_upd_in_applied_state,
  3310. __cam_isp_ctx_epoch_in_bubble_applied,
  3311. NULL,
  3312. __cam_isp_ctx_buf_done_in_bubble_applied,
  3313. __cam_isp_ctx_handle_secondary_events,
  3314. },
  3315. },
  3316. /* HW ERROR */
  3317. {
  3318. .irq_ops = {
  3319. NULL,
  3320. __cam_isp_ctx_sof_in_activated_state,
  3321. __cam_isp_ctx_reg_upd_in_hw_error,
  3322. NULL,
  3323. NULL,
  3324. NULL,
  3325. },
  3326. },
  3327. /* HALT */
  3328. {
  3329. },
  3330. };
  3331. static struct cam_isp_ctx_irq_ops
  3332. cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3333. /* SOF */
  3334. {
  3335. .irq_ops = {
  3336. __cam_isp_ctx_handle_error,
  3337. __cam_isp_ctx_fs2_sof_in_sof_state,
  3338. __cam_isp_ctx_fs2_reg_upd_in_sof,
  3339. __cam_isp_ctx_fs2_sof_in_sof_state,
  3340. __cam_isp_ctx_notify_eof_in_activated_state,
  3341. NULL,
  3342. },
  3343. },
  3344. /* APPLIED */
  3345. {
  3346. .irq_ops = {
  3347. __cam_isp_ctx_handle_error,
  3348. __cam_isp_ctx_sof_in_activated_state,
  3349. __cam_isp_ctx_fs2_reg_upd_in_applied_state,
  3350. __cam_isp_ctx_epoch_in_applied,
  3351. __cam_isp_ctx_notify_eof_in_activated_state,
  3352. __cam_isp_ctx_fs2_buf_done_in_applied,
  3353. },
  3354. },
  3355. /* EPOCH */
  3356. {
  3357. .irq_ops = {
  3358. __cam_isp_ctx_handle_error,
  3359. __cam_isp_ctx_sof_in_epoch,
  3360. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3361. __cam_isp_ctx_notify_sof_in_activated_state,
  3362. __cam_isp_ctx_notify_eof_in_activated_state,
  3363. __cam_isp_ctx_fs2_buf_done_in_epoch,
  3364. },
  3365. },
  3366. /* BUBBLE */
  3367. {
  3368. .irq_ops = {
  3369. __cam_isp_ctx_handle_error,
  3370. __cam_isp_ctx_sof_in_activated_state,
  3371. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3372. __cam_isp_ctx_notify_sof_in_activated_state,
  3373. __cam_isp_ctx_notify_eof_in_activated_state,
  3374. __cam_isp_ctx_buf_done_in_bubble,
  3375. },
  3376. },
  3377. /* Bubble Applied */
  3378. {
  3379. .irq_ops = {
  3380. __cam_isp_ctx_handle_error,
  3381. __cam_isp_ctx_sof_in_activated_state,
  3382. __cam_isp_ctx_reg_upd_in_applied_state,
  3383. __cam_isp_ctx_epoch_in_bubble_applied,
  3384. NULL,
  3385. __cam_isp_ctx_buf_done_in_bubble_applied,
  3386. },
  3387. },
  3388. /* HW ERROR */
  3389. {
  3390. .irq_ops = {
  3391. NULL,
  3392. __cam_isp_ctx_sof_in_activated_state,
  3393. __cam_isp_ctx_reg_upd_in_hw_error,
  3394. NULL,
  3395. NULL,
  3396. NULL,
  3397. },
  3398. },
  3399. /* HALT */
  3400. {
  3401. },
  3402. };
  3403. static struct cam_isp_ctx_irq_ops
  3404. cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3405. /* SOF */
  3406. {
  3407. .irq_ops = {
  3408. __cam_isp_ctx_handle_error,
  3409. NULL,
  3410. NULL,
  3411. NULL,
  3412. NULL,
  3413. NULL,
  3414. },
  3415. },
  3416. /* APPLIED */
  3417. {
  3418. .irq_ops = {
  3419. __cam_isp_ctx_handle_error,
  3420. __cam_isp_ctx_sof_in_activated_state,
  3421. __cam_isp_ctx_reg_upd_in_applied_state,
  3422. __cam_isp_ctx_offline_epoch_in_activated_state,
  3423. NULL,
  3424. __cam_isp_ctx_buf_done_in_applied,
  3425. },
  3426. },
  3427. /* EPOCH */
  3428. {
  3429. .irq_ops = {
  3430. __cam_isp_ctx_handle_error,
  3431. __cam_isp_ctx_sof_in_activated_state,
  3432. NULL,
  3433. __cam_isp_ctx_offline_epoch_in_activated_state,
  3434. NULL,
  3435. __cam_isp_ctx_buf_done_in_epoch,
  3436. },
  3437. },
  3438. /* BUBBLE */
  3439. {
  3440. },
  3441. /* Bubble Applied */
  3442. {
  3443. },
  3444. /* HW ERROR */
  3445. {
  3446. .irq_ops = {
  3447. NULL,
  3448. __cam_isp_ctx_sof_in_activated_state,
  3449. __cam_isp_ctx_reg_upd_in_hw_error,
  3450. NULL,
  3451. NULL,
  3452. NULL,
  3453. },
  3454. },
  3455. /* HALT */
  3456. {
  3457. },
  3458. };
  3459. static int __cam_isp_ctx_apply_req_in_activated_state(
  3460. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
  3461. enum cam_isp_ctx_activated_substate next_state)
  3462. {
  3463. int rc = 0;
  3464. struct cam_ctx_request *req;
  3465. struct cam_ctx_request *active_req = NULL;
  3466. struct cam_isp_ctx_req *req_isp;
  3467. struct cam_isp_ctx_req *active_req_isp;
  3468. struct cam_isp_context *ctx_isp = NULL;
  3469. struct cam_hw_config_args cfg = {0};
  3470. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3471. if (apply->re_apply)
  3472. if (apply->request_id <= ctx_isp->last_applied_req_id) {
  3473. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3474. "ctx_id:%d Trying to reapply the same request %llu again",
  3475. ctx->ctx_id,
  3476. apply->request_id);
  3477. return 0;
  3478. }
  3479. if (list_empty(&ctx->pending_req_list)) {
  3480. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3481. "ctx_id:%d No available request for Apply id %lld",
  3482. ctx->ctx_id,
  3483. apply->request_id);
  3484. rc = -EFAULT;
  3485. goto end;
  3486. }
  3487. /*
  3488. * When the pipeline has issue, the requests can be queued up in the
  3489. * pipeline. In this case, we should reject the additional request.
  3490. * The maximum number of request allowed to be outstanding is 2.
  3491. *
  3492. */
  3493. if (atomic_read(&ctx_isp->process_bubble)) {
  3494. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3495. "ctx_id:%d Processing bubble cannot apply Request Id %llu",
  3496. ctx->ctx_id,
  3497. apply->request_id);
  3498. rc = -EAGAIN;
  3499. goto end;
  3500. }
  3501. spin_lock_bh(&ctx->lock);
  3502. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  3503. list);
  3504. spin_unlock_bh(&ctx->lock);
  3505. /*
  3506. * Check whether the request id is matching the tip, if not, this means
  3507. * we are in the middle of the error handling. Need to reject this apply
  3508. */
  3509. if (req->request_id != apply->request_id) {
  3510. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3511. "ctx_id:%d Invalid Request Id asking %llu existing %llu",
  3512. ctx->ctx_id,
  3513. apply->request_id, req->request_id);
  3514. rc = -EFAULT;
  3515. goto end;
  3516. }
  3517. CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
  3518. req->request_id,
  3519. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
  3520. ctx->ctx_id);
  3521. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3522. if (ctx_isp->active_req_cnt >= 2) {
  3523. CAM_WARN_RATE_LIMIT(CAM_ISP,
  3524. "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u",
  3525. req->request_id,
  3526. ctx_isp->active_req_cnt,
  3527. ctx->ctx_id);
  3528. spin_lock_bh(&ctx->lock);
  3529. if (!list_empty(&ctx->active_req_list))
  3530. active_req = list_first_entry(&ctx->active_req_list,
  3531. struct cam_ctx_request, list);
  3532. else
  3533. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3534. "WARNING: should not happen (cnt = %d) but active_list empty",
  3535. ctx_isp->active_req_cnt);
  3536. spin_unlock_bh(&ctx->lock);
  3537. if (active_req) {
  3538. active_req_isp =
  3539. (struct cam_isp_ctx_req *) active_req->req_priv;
  3540. __cam_isp_ctx_handle_buf_done_fail_log(
  3541. active_req->request_id, active_req_isp,
  3542. ctx_isp->isp_device_type);
  3543. }
  3544. rc = -EFAULT;
  3545. goto end;
  3546. }
  3547. req_isp->bubble_report = apply->report_if_bubble;
  3548. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3549. cfg.request_id = req->request_id;
  3550. cfg.hw_update_entries = req_isp->cfg;
  3551. cfg.num_hw_update_entries = req_isp->num_cfg;
  3552. cfg.priv = &req_isp->hw_update_data;
  3553. cfg.init_packet = 0;
  3554. cfg.reapply_type = req_isp->reapply_type;
  3555. cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
  3556. atomic_set(&ctx_isp->apply_in_progress, 1);
  3557. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  3558. if (!rc) {
  3559. spin_lock_bh(&ctx->lock);
  3560. ctx_isp->substate_activated = next_state;
  3561. ctx_isp->last_applied_req_id = apply->request_id;
  3562. list_del_init(&req->list);
  3563. list_add_tail(&req->list, &ctx->wait_req_list);
  3564. CAM_DBG(CAM_ISP, "new substate Substate[%s], applied req %lld",
  3565. __cam_isp_ctx_substate_val_to_type(next_state),
  3566. ctx_isp->last_applied_req_id);
  3567. spin_unlock_bh(&ctx->lock);
  3568. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3569. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  3570. req->request_id);
  3571. __cam_isp_ctx_update_event_record(ctx_isp,
  3572. CAM_ISP_CTX_EVENT_APPLY, req);
  3573. } else if (rc == -EALREADY) {
  3574. spin_lock_bh(&ctx->lock);
  3575. req_isp->bubble_detected = true;
  3576. req_isp->cdm_reset_before_apply = false;
  3577. atomic_set(&ctx_isp->process_bubble, 1);
  3578. list_del_init(&req->list);
  3579. list_add(&req->list, &ctx->active_req_list);
  3580. ctx_isp->active_req_cnt++;
  3581. spin_unlock_bh(&ctx->lock);
  3582. CAM_DBG(CAM_REQ,
  3583. "move request %lld to active list(cnt = %d), ctx %u",
  3584. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  3585. } else {
  3586. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3587. "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
  3588. ctx->ctx_id, apply->request_id, rc);
  3589. }
  3590. atomic_set(&ctx_isp->apply_in_progress, 0);
  3591. end:
  3592. return rc;
  3593. }
  3594. static int __cam_isp_ctx_apply_req_in_sof(
  3595. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3596. {
  3597. int rc = 0;
  3598. struct cam_isp_context *ctx_isp =
  3599. (struct cam_isp_context *) ctx->ctx_priv;
  3600. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3601. __cam_isp_ctx_substate_val_to_type(
  3602. ctx_isp->substate_activated));
  3603. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3604. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3605. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3606. __cam_isp_ctx_substate_val_to_type(
  3607. ctx_isp->substate_activated));
  3608. if (rc)
  3609. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3610. __cam_isp_ctx_substate_val_to_type(
  3611. ctx_isp->substate_activated), rc);
  3612. return rc;
  3613. }
  3614. static int __cam_isp_ctx_apply_req_in_epoch(
  3615. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3616. {
  3617. int rc = 0;
  3618. struct cam_isp_context *ctx_isp =
  3619. (struct cam_isp_context *) ctx->ctx_priv;
  3620. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3621. __cam_isp_ctx_substate_val_to_type(
  3622. ctx_isp->substate_activated));
  3623. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3624. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3625. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3626. __cam_isp_ctx_substate_val_to_type(
  3627. ctx_isp->substate_activated));
  3628. if (rc)
  3629. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3630. __cam_isp_ctx_substate_val_to_type(
  3631. ctx_isp->substate_activated), rc);
  3632. return rc;
  3633. }
  3634. static int __cam_isp_ctx_apply_req_in_bubble(
  3635. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3636. {
  3637. int rc = 0;
  3638. struct cam_isp_context *ctx_isp =
  3639. (struct cam_isp_context *) ctx->ctx_priv;
  3640. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3641. __cam_isp_ctx_substate_val_to_type(
  3642. ctx_isp->substate_activated));
  3643. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3644. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
  3645. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3646. __cam_isp_ctx_substate_val_to_type(
  3647. ctx_isp->substate_activated));
  3648. if (rc)
  3649. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3650. __cam_isp_ctx_substate_val_to_type(
  3651. ctx_isp->substate_activated), rc);
  3652. return rc;
  3653. }
  3654. static int __cam_isp_ctx_apply_default_req_settings(
  3655. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3656. {
  3657. int rc = 0;
  3658. struct cam_isp_context *isp_ctx =
  3659. (struct cam_isp_context *) ctx->ctx_priv;
  3660. struct cam_hw_cmd_args hw_cmd_args;
  3661. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  3662. hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx;
  3663. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  3664. isp_hw_cmd_args.cmd_type =
  3665. CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG;
  3666. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  3667. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  3668. &hw_cmd_args);
  3669. if (rc)
  3670. CAM_ERR(CAM_ISP,
  3671. "Failed to apply default settings rc %d", rc);
  3672. else
  3673. CAM_DBG(CAM_ISP, "Applied default settings rc %d", rc);
  3674. return rc;
  3675. }
  3676. static void *cam_isp_ctx_user_dump_req_list(
  3677. void *dump_struct, uint8_t *addr_ptr)
  3678. {
  3679. struct list_head *head = NULL;
  3680. uint64_t *addr;
  3681. struct cam_ctx_request *req, *req_temp;
  3682. head = (struct list_head *)dump_struct;
  3683. addr = (uint64_t *)addr_ptr;
  3684. if (!list_empty(head)) {
  3685. list_for_each_entry_safe(req, req_temp, head, list) {
  3686. *addr++ = req->request_id;
  3687. }
  3688. }
  3689. return addr;
  3690. }
  3691. static void *cam_isp_ctx_user_dump_active_requests(
  3692. void *dump_struct, uint8_t *addr_ptr)
  3693. {
  3694. uint64_t *addr;
  3695. struct cam_ctx_request *req;
  3696. req = (struct cam_ctx_request *)dump_struct;
  3697. addr = (uint64_t *)addr_ptr;
  3698. *addr++ = req->request_id;
  3699. return addr;
  3700. }
  3701. static int __cam_isp_ctx_dump_req_info(
  3702. struct cam_context *ctx,
  3703. struct cam_ctx_request *req,
  3704. struct cam_common_hw_dump_args *dump_args)
  3705. {
  3706. int i, rc = 0;
  3707. uint32_t min_len;
  3708. size_t remain_len;
  3709. struct cam_isp_ctx_req *req_isp;
  3710. struct cam_isp_context *ctx_isp;
  3711. struct cam_ctx_request *req_temp;
  3712. if (!req || !ctx || !dump_args) {
  3713. CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK",
  3714. req, ctx, dump_args);
  3715. return -EINVAL;
  3716. }
  3717. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3718. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  3719. if (dump_args->buf_len <= dump_args->offset) {
  3720. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  3721. dump_args->buf_len, dump_args->offset);
  3722. return -ENOSPC;
  3723. }
  3724. remain_len = dump_args->buf_len - dump_args->offset;
  3725. min_len = sizeof(struct cam_isp_context_dump_header) +
  3726. (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS *
  3727. req_isp->num_fence_map_out *
  3728. sizeof(uint64_t));
  3729. if (remain_len < min_len) {
  3730. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  3731. remain_len, min_len);
  3732. return -ENOSPC;
  3733. }
  3734. /* Dump pending request list */
  3735. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  3736. &ctx->pending_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_PENDING_REQUESTS:");
  3737. if (rc) {
  3738. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Pending request dump failed, rc: %d",
  3739. rc);
  3740. return rc;
  3741. }
  3742. /* Dump applied request list */
  3743. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  3744. &ctx->wait_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_APPLIED_REQUESTS:");
  3745. if (rc) {
  3746. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Applied request dump failed, rc: %d",
  3747. rc);
  3748. return rc;
  3749. }
  3750. /* Dump active request list */
  3751. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  3752. &ctx->active_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_ACTIVE_REQUESTS:");
  3753. if (rc) {
  3754. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Active request dump failed, rc: %d",
  3755. rc);
  3756. return rc;
  3757. }
  3758. /* Dump active request fences */
  3759. if (!list_empty(&ctx->active_req_list)) {
  3760. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  3761. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3762. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3763. rc = cam_common_user_dump_helper(dump_args,
  3764. cam_isp_ctx_user_dump_active_requests,
  3765. req, sizeof(uint64_t),
  3766. "ISP_OUT_FENCE_REQUEST_ACTIVE.%s.%u.%d:",
  3767. __cam_isp_ife_sfe_resource_handle_id_to_type(
  3768. req_isp->fence_map_out[i].resource_handle),
  3769. &(req_isp->fence_map_out[i].image_buf_addr),
  3770. req_isp->fence_map_out[i].sync_id);
  3771. if (rc) {
  3772. CAM_ERR(CAM_ISP,
  3773. "CAM_ISP_CONTEXT DUMP_REQ_INFO: Dump failed, rc: %d",
  3774. rc);
  3775. return rc;
  3776. }
  3777. }
  3778. }
  3779. }
  3780. return rc;
  3781. }
  3782. static void *cam_isp_ctx_user_dump_timer(
  3783. void *dump_struct, uint8_t *addr_ptr)
  3784. {
  3785. struct cam_ctx_request *req = NULL;
  3786. struct cam_isp_ctx_req *req_isp = NULL;
  3787. uint64_t *addr;
  3788. ktime_t cur_time;
  3789. req = (struct cam_ctx_request *)dump_struct;
  3790. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3791. cur_time = ktime_get();
  3792. addr = (uint64_t *)addr_ptr;
  3793. *addr++ = req->request_id;
  3794. *addr++ = ktime_to_timespec64(
  3795. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_sec;
  3796. *addr++ = ktime_to_timespec64(
  3797. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_nsec / NSEC_PER_USEC;
  3798. *addr++ = ktime_to_timespec64(cur_time).tv_sec;
  3799. *addr++ = ktime_to_timespec64(cur_time).tv_nsec / NSEC_PER_USEC;
  3800. return addr;
  3801. }
  3802. static void *cam_isp_ctx_user_dump_stream_info(
  3803. void *dump_struct, uint8_t *addr_ptr)
  3804. {
  3805. struct cam_context *ctx = NULL;
  3806. struct cam_ife_hw_mgr_ctx *hw_mgr_ctx = NULL;
  3807. struct cam_isp_hw_mgr_res *hw_mgr_res = NULL;
  3808. struct cam_isp_resource_node *hw_res = NULL;
  3809. int hw_idx[CAM_ISP_HW_SPLIT_MAX] = { -1, -1 };
  3810. int sfe_hw_idx[CAM_ISP_HW_SPLIT_MAX] = { -1, -1 };
  3811. int32_t *addr;
  3812. int i;
  3813. ctx = (struct cam_context *)dump_struct;
  3814. hw_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)ctx->ctxt_to_hw_map;
  3815. if (!list_empty(&hw_mgr_ctx->res_list_ife_src)) {
  3816. hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_ife_src,
  3817. struct cam_isp_hw_mgr_res, list);
  3818. for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
  3819. hw_res = hw_mgr_res->hw_res[i];
  3820. if (hw_res && hw_res->hw_intf)
  3821. hw_idx[i] = hw_res->hw_intf->hw_idx;
  3822. }
  3823. }
  3824. if (!list_empty(&hw_mgr_ctx->res_list_sfe_src)) {
  3825. hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_sfe_src,
  3826. struct cam_isp_hw_mgr_res, list);
  3827. for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
  3828. hw_res = hw_mgr_res->hw_res[i];
  3829. if (hw_res && hw_res->hw_intf)
  3830. sfe_hw_idx[i] = hw_res->hw_intf->hw_idx;
  3831. }
  3832. }
  3833. addr = (int32_t *)addr_ptr;
  3834. *addr++ = ctx->ctx_id;
  3835. *addr++ = ctx->dev_hdl;
  3836. *addr++ = ctx->link_hdl;
  3837. *addr++ = hw_idx[CAM_ISP_HW_SPLIT_LEFT];
  3838. *addr++ = sfe_hw_idx[CAM_ISP_HW_SPLIT_LEFT];
  3839. *addr++ = hw_mgr_ctx->flags.is_sfe_shdr;
  3840. return addr;
  3841. }
  3842. static int __cam_isp_ctx_dump_in_top_state(
  3843. struct cam_context *ctx,
  3844. struct cam_req_mgr_dump_info *dump_info)
  3845. {
  3846. int rc = 0;
  3847. bool dump_only_event_record = false;
  3848. size_t buf_len;
  3849. size_t remain_len;
  3850. ktime_t cur_time;
  3851. uint32_t min_len;
  3852. uint64_t diff;
  3853. uintptr_t cpu_addr;
  3854. struct cam_isp_context *ctx_isp;
  3855. struct cam_ctx_request *req = NULL;
  3856. struct cam_isp_ctx_req *req_isp;
  3857. struct cam_ctx_request *req_temp;
  3858. struct cam_hw_dump_args ife_dump_args;
  3859. struct cam_common_hw_dump_args dump_args;
  3860. spin_lock_bh(&ctx->lock);
  3861. list_for_each_entry_safe(req, req_temp,
  3862. &ctx->active_req_list, list) {
  3863. if (req->request_id == dump_info->req_id) {
  3864. CAM_INFO(CAM_ISP, "isp dump active list req: %lld",
  3865. dump_info->req_id);
  3866. goto hw_dump;
  3867. }
  3868. }
  3869. list_for_each_entry_safe(req, req_temp,
  3870. &ctx->wait_req_list, list) {
  3871. if (req->request_id == dump_info->req_id) {
  3872. CAM_INFO(CAM_ISP, "isp dump wait list req: %lld",
  3873. dump_info->req_id);
  3874. goto hw_dump;
  3875. }
  3876. }
  3877. goto end;
  3878. hw_dump:
  3879. rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
  3880. &cpu_addr, &buf_len);
  3881. if (rc) {
  3882. CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
  3883. dump_info->buf_handle, rc);
  3884. goto end;
  3885. }
  3886. if (buf_len <= dump_info->offset) {
  3887. spin_unlock_bh(&ctx->lock);
  3888. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  3889. buf_len, dump_info->offset);
  3890. return -ENOSPC;
  3891. }
  3892. remain_len = buf_len - dump_info->offset;
  3893. min_len = sizeof(struct cam_isp_context_dump_header) +
  3894. (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
  3895. if (remain_len < min_len) {
  3896. spin_unlock_bh(&ctx->lock);
  3897. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  3898. remain_len, min_len);
  3899. return -ENOSPC;
  3900. }
  3901. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3902. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3903. cur_time = ktime_get();
  3904. diff = ktime_us_delta(
  3905. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY],
  3906. cur_time);
  3907. if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
  3908. CAM_INFO(CAM_ISP, "req %lld found no error",
  3909. req->request_id);
  3910. dump_only_event_record = true;
  3911. }
  3912. dump_args.req_id = dump_info->req_id;
  3913. dump_args.cpu_addr = cpu_addr;
  3914. dump_args.buf_len = buf_len;
  3915. dump_args.offset = dump_info->offset;
  3916. dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3917. /* Dump time info */
  3918. rc = cam_common_user_dump_helper(&dump_args, cam_isp_ctx_user_dump_timer,
  3919. req, sizeof(uint64_t), "ISP_CTX_DUMP:");
  3920. if (rc) {
  3921. CAM_ERR(CAM_ISP, "Time dump fail %lld, rc: %d",
  3922. req->request_id, rc);
  3923. goto end;
  3924. }
  3925. dump_info->offset = dump_args.offset;
  3926. /* Dump stream info */
  3927. ctx->ctxt_to_hw_map = ctx_isp->hw_ctx;
  3928. rc = cam_common_user_dump_helper(&dump_args, cam_isp_ctx_user_dump_stream_info,
  3929. ctx, sizeof(int32_t), "ISP_STREAM_INFO:");
  3930. if (rc) {
  3931. CAM_ERR(CAM_ISP, "Stream info dump fail %lld, rc: %d",
  3932. req->request_id, rc);
  3933. goto end;
  3934. }
  3935. dump_info->offset = dump_args.offset;
  3936. /* Dump event record */
  3937. rc = __cam_isp_ctx_dump_event_record(ctx_isp, &dump_args);
  3938. if (rc) {
  3939. CAM_ERR(CAM_ISP, "Event record dump fail %lld, rc: %d",
  3940. req->request_id, rc);
  3941. goto end;
  3942. }
  3943. dump_info->offset = dump_args.offset;
  3944. if (dump_only_event_record) {
  3945. goto end;
  3946. }
  3947. /* Dump state monitor array */
  3948. rc = __cam_isp_ctx_user_dump_state_monitor_array(ctx_isp, &dump_args);
  3949. if (rc) {
  3950. CAM_ERR(CAM_ISP, "Dump event fail %lld, rc: %d",
  3951. req->request_id, rc);
  3952. goto end;
  3953. }
  3954. /* Dump request info */
  3955. rc = __cam_isp_ctx_dump_req_info(ctx, req, &dump_args);
  3956. if (rc) {
  3957. CAM_ERR(CAM_ISP, "Dump Req info fail %lld, rc: %d",
  3958. req->request_id, rc);
  3959. goto end;
  3960. }
  3961. spin_unlock_bh(&ctx->lock);
  3962. /* Dump CSID, VFE, and SFE info */
  3963. dump_info->offset = dump_args.offset;
  3964. if (ctx->hw_mgr_intf->hw_dump) {
  3965. ife_dump_args.offset = dump_args.offset;
  3966. ife_dump_args.request_id = dump_info->req_id;
  3967. ife_dump_args.buf_handle = dump_info->buf_handle;
  3968. ife_dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3969. rc = ctx->hw_mgr_intf->hw_dump(
  3970. ctx->hw_mgr_intf->hw_mgr_priv,
  3971. &ife_dump_args);
  3972. dump_info->offset = ife_dump_args.offset;
  3973. }
  3974. return rc;
  3975. end:
  3976. spin_unlock_bh(&ctx->lock);
  3977. return rc;
  3978. }
  3979. static int __cam_isp_ctx_flush_req_in_flushed_state(
  3980. struct cam_context *ctx,
  3981. struct cam_req_mgr_flush_request *flush_req)
  3982. {
  3983. CAM_INFO(CAM_ISP, "Flush (type %d) in flushed state req id %lld ctx_id:%d",
  3984. flush_req->type, flush_req->req_id, ctx->ctx_id);
  3985. if (flush_req->req_id > ctx->last_flush_req)
  3986. ctx->last_flush_req = flush_req->req_id;
  3987. return 0;
  3988. }
  3989. static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
  3990. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  3991. {
  3992. int i, rc, tmp = 0;
  3993. uint32_t cancel_req_id_found = 0;
  3994. struct cam_ctx_request *req;
  3995. struct cam_ctx_request *req_temp;
  3996. struct cam_isp_ctx_req *req_isp;
  3997. struct list_head flush_list;
  3998. struct cam_isp_context *ctx_isp = NULL;
  3999. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4000. INIT_LIST_HEAD(&flush_list);
  4001. if (list_empty(req_list)) {
  4002. CAM_DBG(CAM_ISP, "request list is empty");
  4003. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4004. CAM_INFO(CAM_ISP, "no request to cancel (last applied:%lld cancel:%lld)",
  4005. ctx_isp->last_applied_req_id, flush_req->req_id);
  4006. return -EINVAL;
  4007. } else
  4008. return 0;
  4009. }
  4010. CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
  4011. flush_req->type, flush_req->req_id);
  4012. list_for_each_entry_safe(req, req_temp, req_list, list) {
  4013. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4014. if (req->request_id != flush_req->req_id) {
  4015. continue;
  4016. } else {
  4017. list_del_init(&req->list);
  4018. list_add_tail(&req->list, &flush_list);
  4019. cancel_req_id_found = 1;
  4020. __cam_isp_ctx_update_state_monitor_array(
  4021. ctx_isp,
  4022. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
  4023. req->request_id);
  4024. break;
  4025. }
  4026. }
  4027. list_del_init(&req->list);
  4028. list_add_tail(&req->list, &flush_list);
  4029. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  4030. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id);
  4031. }
  4032. if (list_empty(&flush_list)) {
  4033. /*
  4034. * Maybe the req isn't sent to KMD since UMD already skip
  4035. * req in CSL layer.
  4036. */
  4037. CAM_INFO(CAM_ISP,
  4038. "flush list is empty, flush type %d for req %llu",
  4039. flush_req->type, flush_req->req_id);
  4040. return 0;
  4041. }
  4042. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  4043. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4044. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4045. if (req_isp->fence_map_out[i].sync_id != -1) {
  4046. CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
  4047. req->request_id,
  4048. req_isp->fence_map_out[i].sync_id);
  4049. rc = cam_sync_signal(
  4050. req_isp->fence_map_out[i].sync_id,
  4051. CAM_SYNC_STATE_SIGNALED_CANCEL,
  4052. CAM_SYNC_ISP_EVENT_FLUSH);
  4053. if (rc) {
  4054. tmp = req_isp->fence_map_out[i].sync_id;
  4055. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4056. "signal fence %d failed", tmp);
  4057. }
  4058. req_isp->fence_map_out[i].sync_id = -1;
  4059. }
  4060. }
  4061. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  4062. req_isp->cdm_reset_before_apply = false;
  4063. list_del_init(&req->list);
  4064. list_add_tail(&req->list, &ctx->free_req_list);
  4065. }
  4066. return 0;
  4067. }
  4068. static int __cam_isp_ctx_flush_req_in_top_state(
  4069. struct cam_context *ctx,
  4070. struct cam_req_mgr_flush_request *flush_req)
  4071. {
  4072. int rc = 0;
  4073. struct cam_isp_context *ctx_isp;
  4074. struct cam_isp_stop_args stop_isp;
  4075. struct cam_hw_stop_args stop_args;
  4076. struct cam_hw_reset_args reset_args;
  4077. struct cam_req_mgr_timer_notify timer;
  4078. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4079. CAM_DBG(CAM_ISP, "Flush pending list");
  4080. spin_lock_bh(&ctx->lock);
  4081. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4082. spin_unlock_bh(&ctx->lock);
  4083. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  4084. if (ctx->state <= CAM_CTX_READY) {
  4085. ctx->state = CAM_CTX_ACQUIRED;
  4086. goto end;
  4087. }
  4088. spin_lock_bh(&ctx->lock);
  4089. ctx->state = CAM_CTX_FLUSHED;
  4090. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  4091. spin_unlock_bh(&ctx->lock);
  4092. CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%d",
  4093. flush_req->req_id, ctx->ctx_id);
  4094. ctx->last_flush_req = flush_req->req_id;
  4095. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx);
  4096. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4097. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  4098. stop_isp.stop_only = true;
  4099. stop_isp.is_internal_stop = false;
  4100. stop_args.args = (void *)&stop_isp;
  4101. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  4102. &stop_args);
  4103. if (rc)
  4104. CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
  4105. rc);
  4106. CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.");
  4107. CAM_DBG(CAM_ISP, "Flush wait and active lists");
  4108. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  4109. timer.link_hdl = ctx->link_hdl;
  4110. timer.dev_hdl = ctx->dev_hdl;
  4111. timer.state = false;
  4112. ctx->ctx_crm_intf->notify_timer(&timer);
  4113. }
  4114. spin_lock_bh(&ctx->lock);
  4115. if (!list_empty(&ctx->wait_req_list))
  4116. rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
  4117. flush_req);
  4118. if (!list_empty(&ctx->active_req_list))
  4119. rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
  4120. flush_req);
  4121. ctx_isp->active_req_cnt = 0;
  4122. spin_unlock_bh(&ctx->lock);
  4123. reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4124. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  4125. &reset_args);
  4126. if (rc)
  4127. CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
  4128. ctx_isp->init_received = false;
  4129. }
  4130. end:
  4131. ctx_isp->bubble_frame_cnt = 0;
  4132. atomic_set(&ctx_isp->process_bubble, 0);
  4133. atomic_set(&ctx_isp->rxd_epoch, 0);
  4134. atomic_set(&ctx_isp->internal_recovery_set, 0);
  4135. return rc;
  4136. }
  4137. static int __cam_isp_ctx_flush_req_in_ready(
  4138. struct cam_context *ctx,
  4139. struct cam_req_mgr_flush_request *flush_req)
  4140. {
  4141. int rc = 0;
  4142. CAM_DBG(CAM_ISP, "try to flush pending list");
  4143. spin_lock_bh(&ctx->lock);
  4144. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4145. /* if nothing is in pending req list, change state to acquire */
  4146. if (list_empty(&ctx->pending_req_list))
  4147. ctx->state = CAM_CTX_ACQUIRED;
  4148. spin_unlock_bh(&ctx->lock);
  4149. trace_cam_context_state("ISP", ctx);
  4150. CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
  4151. ctx->state);
  4152. return rc;
  4153. }
  4154. static struct cam_ctx_ops
  4155. cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4156. /* SOF */
  4157. {
  4158. .ioctl_ops = {},
  4159. .crm_ops = {
  4160. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4161. .notify_frame_skip =
  4162. __cam_isp_ctx_apply_default_req_settings,
  4163. },
  4164. .irq_ops = NULL,
  4165. },
  4166. /* APPLIED */
  4167. {
  4168. .ioctl_ops = {},
  4169. .crm_ops = {},
  4170. .irq_ops = NULL,
  4171. },
  4172. /* EPOCH */
  4173. {
  4174. .ioctl_ops = {},
  4175. .crm_ops = {
  4176. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4177. .notify_frame_skip =
  4178. __cam_isp_ctx_apply_default_req_settings,
  4179. },
  4180. .irq_ops = NULL,
  4181. },
  4182. /* BUBBLE */
  4183. {
  4184. .ioctl_ops = {},
  4185. .crm_ops = {
  4186. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4187. .notify_frame_skip =
  4188. __cam_isp_ctx_apply_default_req_settings,
  4189. },
  4190. .irq_ops = NULL,
  4191. },
  4192. /* Bubble Applied */
  4193. {
  4194. .ioctl_ops = {},
  4195. .crm_ops = {},
  4196. .irq_ops = NULL,
  4197. },
  4198. /* HW ERROR */
  4199. {
  4200. .ioctl_ops = {},
  4201. .crm_ops = {},
  4202. .irq_ops = NULL,
  4203. },
  4204. /* HALT */
  4205. {
  4206. .ioctl_ops = {},
  4207. .crm_ops = {},
  4208. .irq_ops = NULL,
  4209. },
  4210. };
  4211. static struct cam_ctx_ops
  4212. cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4213. /* SOF */
  4214. {
  4215. .ioctl_ops = {},
  4216. .crm_ops = {
  4217. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4218. },
  4219. .irq_ops = NULL,
  4220. },
  4221. /* APPLIED */
  4222. {
  4223. .ioctl_ops = {},
  4224. .crm_ops = {},
  4225. .irq_ops = NULL,
  4226. },
  4227. /* EPOCH */
  4228. {
  4229. .ioctl_ops = {},
  4230. .crm_ops = {
  4231. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4232. },
  4233. .irq_ops = NULL,
  4234. },
  4235. /* BUBBLE */
  4236. {
  4237. .ioctl_ops = {},
  4238. .crm_ops = {
  4239. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4240. },
  4241. .irq_ops = NULL,
  4242. },
  4243. /* Bubble Applied */
  4244. {
  4245. .ioctl_ops = {},
  4246. .crm_ops = {},
  4247. .irq_ops = NULL,
  4248. },
  4249. /* HW ERROR */
  4250. {
  4251. .ioctl_ops = {},
  4252. .crm_ops = {},
  4253. .irq_ops = NULL,
  4254. },
  4255. /* HALT */
  4256. {
  4257. .ioctl_ops = {},
  4258. .crm_ops = {},
  4259. .irq_ops = NULL,
  4260. },
  4261. };
  4262. static int __cam_isp_ctx_rdi_only_sof_in_top_state(
  4263. struct cam_isp_context *ctx_isp, void *evt_data)
  4264. {
  4265. int rc = 0;
  4266. struct cam_context *ctx = ctx_isp->base;
  4267. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4268. uint64_t request_id = 0;
  4269. if (!evt_data) {
  4270. CAM_ERR(CAM_ISP, "in valid sof event data");
  4271. return -EINVAL;
  4272. }
  4273. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4274. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4275. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4276. /*
  4277. * notify reqmgr with sof signal. Note, due to scheduling delay
  4278. * we can run into situation that two active requests has already
  4279. * be in the active queue while we try to do the notification.
  4280. * In this case, we need to skip the current notification. This
  4281. * helps the state machine to catch up the delay.
  4282. */
  4283. if (ctx_isp->active_req_cnt <= 2) {
  4284. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4285. /*
  4286. * It's possible for rup done to be processed before
  4287. * SOF, check for first active request shutter here
  4288. */
  4289. if (!list_empty(&ctx->active_req_list)) {
  4290. struct cam_ctx_request *req = NULL;
  4291. req = list_first_entry(&ctx->active_req_list,
  4292. struct cam_ctx_request, list);
  4293. if (req->request_id > ctx_isp->reported_req_id) {
  4294. request_id = req->request_id;
  4295. ctx_isp->reported_req_id = request_id;
  4296. }
  4297. }
  4298. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4299. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4300. } else {
  4301. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  4302. }
  4303. if (list_empty(&ctx->active_req_list))
  4304. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4305. else
  4306. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  4307. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4308. __cam_isp_ctx_substate_val_to_type(
  4309. ctx_isp->substate_activated));
  4310. return rc;
  4311. }
  4312. static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
  4313. struct cam_isp_context *ctx_isp, void *evt_data)
  4314. {
  4315. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4316. if (!evt_data) {
  4317. CAM_ERR(CAM_ISP, "in valid sof event data");
  4318. return -EINVAL;
  4319. }
  4320. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4321. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4322. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4323. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
  4324. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4325. __cam_isp_ctx_substate_val_to_type(
  4326. ctx_isp->substate_activated));
  4327. return 0;
  4328. }
  4329. static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
  4330. struct cam_isp_context *ctx_isp, void *evt_data)
  4331. {
  4332. struct cam_ctx_request *req;
  4333. struct cam_isp_ctx_req *req_isp;
  4334. struct cam_context *ctx = ctx_isp->base;
  4335. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4336. uint64_t request_id = 0;
  4337. /*
  4338. * Sof in bubble applied state means, reg update not received.
  4339. * before increment frame id and override time stamp value, send
  4340. * the previous sof time stamp that got captured in the
  4341. * sof in applied state.
  4342. */
  4343. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4344. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4345. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4346. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4347. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4348. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4349. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4350. if (list_empty(&ctx->wait_req_list)) {
  4351. /*
  4352. * If no pending req in epoch, this is an error case.
  4353. * The recovery is to go back to sof state
  4354. */
  4355. CAM_ERR(CAM_ISP, "No wait request");
  4356. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4357. /* Send SOF event as empty frame*/
  4358. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4359. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4360. goto end;
  4361. }
  4362. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  4363. list);
  4364. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4365. req_isp->bubble_detected = true;
  4366. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  4367. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  4368. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  4369. req_isp->cdm_reset_before_apply = false;
  4370. if (req_isp->bubble_report) {
  4371. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  4372. req->request_id, ctx_isp);
  4373. atomic_set(&ctx_isp->process_bubble, 1);
  4374. } else {
  4375. req_isp->bubble_report = 0;
  4376. }
  4377. /*
  4378. * Always move the request to active list. Let buf done
  4379. * function handles the rest.
  4380. */
  4381. list_del_init(&req->list);
  4382. list_add_tail(&req->list, &ctx->active_req_list);
  4383. ctx_isp->active_req_cnt++;
  4384. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
  4385. req->request_id, ctx_isp->active_req_cnt);
  4386. if (!req_isp->bubble_report) {
  4387. if (req->request_id > ctx_isp->reported_req_id) {
  4388. request_id = req->request_id;
  4389. ctx_isp->reported_req_id = request_id;
  4390. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4391. CAM_REQ_MGR_SOF_EVENT_ERROR);
  4392. } else
  4393. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4394. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4395. } else
  4396. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4397. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4398. /* change the state to bubble, as reg update has not come */
  4399. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  4400. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4401. __cam_isp_ctx_substate_val_to_type(
  4402. ctx_isp->substate_activated));
  4403. end:
  4404. return 0;
  4405. }
  4406. static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
  4407. struct cam_isp_context *ctx_isp, void *evt_data)
  4408. {
  4409. uint32_t i;
  4410. struct cam_ctx_request *req;
  4411. struct cam_context *ctx = ctx_isp->base;
  4412. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4413. struct cam_isp_ctx_req *req_isp;
  4414. struct cam_hw_cmd_args hw_cmd_args;
  4415. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4416. uint64_t request_id = 0;
  4417. uint64_t last_cdm_done_req = 0;
  4418. int rc = 0;
  4419. if (!evt_data) {
  4420. CAM_ERR(CAM_ISP, "in valid sof event data");
  4421. return -EINVAL;
  4422. }
  4423. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4424. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4425. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4426. if (atomic_read(&ctx_isp->process_bubble)) {
  4427. if (list_empty(&ctx->active_req_list)) {
  4428. CAM_ERR(CAM_ISP, "No available active req in bubble");
  4429. atomic_set(&ctx_isp->process_bubble, 0);
  4430. return -EINVAL;
  4431. }
  4432. if (ctx_isp->last_sof_timestamp ==
  4433. ctx_isp->sof_timestamp_val) {
  4434. CAM_DBG(CAM_ISP,
  4435. "Tasklet delay detected! Bubble frame: %lld check skipped, sof_timestamp: %lld, ctx_id: %d",
  4436. ctx_isp->frame_id,
  4437. ctx_isp->sof_timestamp_val,
  4438. ctx->ctx_id);
  4439. goto end;
  4440. }
  4441. req = list_first_entry(&ctx->active_req_list,
  4442. struct cam_ctx_request, list);
  4443. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4444. if (req_isp->bubble_detected) {
  4445. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4446. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4447. isp_hw_cmd_args.cmd_type =
  4448. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  4449. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4450. rc = ctx->hw_mgr_intf->hw_cmd(
  4451. ctx->hw_mgr_intf->hw_mgr_priv,
  4452. &hw_cmd_args);
  4453. if (rc) {
  4454. CAM_ERR(CAM_ISP, "HW command failed");
  4455. return rc;
  4456. }
  4457. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  4458. CAM_DBG(CAM_ISP, "last_cdm_done req: %d ctx_id: %d",
  4459. last_cdm_done_req, ctx->ctx_id);
  4460. if (last_cdm_done_req >= req->request_id) {
  4461. CAM_DBG(CAM_ISP,
  4462. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  4463. req->request_id);
  4464. if (req_isp->num_fence_map_out ==
  4465. req_isp->num_deferred_acks) {
  4466. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  4467. true,
  4468. CAM_SYNC_STATE_SIGNALED_ERROR,
  4469. CAM_SYNC_ISP_EVENT_BUBBLE);
  4470. __cam_isp_ctx_handle_buf_done_for_req_list(
  4471. ctx_isp, req);
  4472. }
  4473. goto end;
  4474. } else {
  4475. CAM_WARN(CAM_ISP,
  4476. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  4477. req->request_id);
  4478. req_isp->num_acked = 0;
  4479. req_isp->num_deferred_acks = 0;
  4480. req_isp->bubble_detected = false;
  4481. req_isp->cdm_reset_before_apply = true;
  4482. list_del_init(&req->list);
  4483. list_add(&req->list, &ctx->pending_req_list);
  4484. atomic_set(&ctx_isp->process_bubble, 0);
  4485. ctx_isp->active_req_cnt--;
  4486. CAM_DBG(CAM_REQ,
  4487. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  4488. req->request_id,
  4489. ctx_isp->active_req_cnt, ctx->ctx_id);
  4490. }
  4491. goto end;
  4492. }
  4493. }
  4494. /*
  4495. * Signal all active requests with error and move the all the active
  4496. * requests to free list
  4497. */
  4498. while (!list_empty(&ctx->active_req_list)) {
  4499. req = list_first_entry(&ctx->active_req_list,
  4500. struct cam_ctx_request, list);
  4501. list_del_init(&req->list);
  4502. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4503. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  4504. req_isp->num_fence_map_out);
  4505. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4506. if (req_isp->fence_map_out[i].sync_id != -1) {
  4507. cam_sync_signal(
  4508. req_isp->fence_map_out[i].sync_id,
  4509. CAM_SYNC_STATE_SIGNALED_ERROR,
  4510. CAM_SYNC_ISP_EVENT_BUBBLE);
  4511. }
  4512. list_add_tail(&req->list, &ctx->free_req_list);
  4513. ctx_isp->active_req_cnt--;
  4514. }
  4515. end:
  4516. /* notify reqmgr with sof signal */
  4517. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4518. /*
  4519. * It is idle frame with out any applied request id, send
  4520. * request id as zero
  4521. */
  4522. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4523. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4524. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4525. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4526. __cam_isp_ctx_substate_val_to_type(
  4527. ctx_isp->substate_activated));
  4528. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  4529. return 0;
  4530. }
  4531. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state(
  4532. struct cam_isp_context *ctx_isp, void *evt_data)
  4533. {
  4534. struct cam_ctx_request *req = NULL;
  4535. struct cam_context *ctx = ctx_isp->base;
  4536. req = list_first_entry(&ctx->active_req_list,
  4537. struct cam_ctx_request, list);
  4538. CAM_INFO(CAM_ISP, "Received RUP for Bubble Request", req->request_id);
  4539. return 0;
  4540. }
  4541. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
  4542. struct cam_isp_context *ctx_isp, void *evt_data)
  4543. {
  4544. struct cam_ctx_request *req = NULL;
  4545. struct cam_context *ctx = ctx_isp->base;
  4546. struct cam_isp_ctx_req *req_isp;
  4547. uint64_t request_id = 0;
  4548. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  4549. /* notify reqmgr with sof signal*/
  4550. if (list_empty(&ctx->wait_req_list)) {
  4551. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  4552. goto error;
  4553. }
  4554. req = list_first_entry(&ctx->wait_req_list,
  4555. struct cam_ctx_request, list);
  4556. list_del_init(&req->list);
  4557. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4558. request_id =
  4559. (req_isp->hw_update_data.packet_opcode_type ==
  4560. CAM_ISP_PACKET_INIT_DEV) ? 0 : req->request_id;
  4561. if (req_isp->num_fence_map_out != 0) {
  4562. list_add_tail(&req->list, &ctx->active_req_list);
  4563. ctx_isp->active_req_cnt++;
  4564. CAM_DBG(CAM_ISP,
  4565. "move request %lld to active list(cnt = %d)",
  4566. req->request_id, ctx_isp->active_req_cnt);
  4567. /* if packet has buffers, set correct request id */
  4568. request_id = req->request_id;
  4569. } else {
  4570. /* no io config, so the request is completed. */
  4571. list_add_tail(&req->list, &ctx->free_req_list);
  4572. CAM_DBG(CAM_ISP,
  4573. "move active req %lld to free list(cnt=%d)",
  4574. req->request_id, ctx_isp->active_req_cnt);
  4575. }
  4576. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4577. if (request_id)
  4578. ctx_isp->reported_req_id = request_id;
  4579. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4580. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4581. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4582. __cam_isp_ctx_substate_val_to_type(
  4583. ctx_isp->substate_activated));
  4584. __cam_isp_ctx_update_event_record(ctx_isp,
  4585. CAM_ISP_CTX_EVENT_RUP, req);
  4586. return 0;
  4587. error:
  4588. /* Send SOF event as idle frame*/
  4589. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4590. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4591. __cam_isp_ctx_update_event_record(ctx_isp,
  4592. CAM_ISP_CTX_EVENT_RUP, NULL);
  4593. /*
  4594. * There is no request in the pending list, move the sub state machine
  4595. * to SOF sub state
  4596. */
  4597. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4598. return 0;
  4599. }
  4600. static struct cam_isp_ctx_irq_ops
  4601. cam_isp_ctx_rdi_only_activated_state_machine_irq
  4602. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  4603. /* SOF */
  4604. {
  4605. .irq_ops = {
  4606. NULL,
  4607. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4608. __cam_isp_ctx_reg_upd_in_sof,
  4609. NULL,
  4610. NULL,
  4611. NULL,
  4612. },
  4613. },
  4614. /* APPLIED */
  4615. {
  4616. .irq_ops = {
  4617. __cam_isp_ctx_handle_error,
  4618. __cam_isp_ctx_rdi_only_sof_in_applied_state,
  4619. __cam_isp_ctx_reg_upd_in_applied_state,
  4620. NULL,
  4621. NULL,
  4622. __cam_isp_ctx_buf_done_in_applied,
  4623. },
  4624. },
  4625. /* EPOCH */
  4626. {
  4627. .irq_ops = {
  4628. __cam_isp_ctx_handle_error,
  4629. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4630. NULL,
  4631. NULL,
  4632. NULL,
  4633. __cam_isp_ctx_buf_done_in_epoch,
  4634. },
  4635. },
  4636. /* BUBBLE*/
  4637. {
  4638. .irq_ops = {
  4639. __cam_isp_ctx_handle_error,
  4640. __cam_isp_ctx_rdi_only_sof_in_bubble_state,
  4641. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state,
  4642. NULL,
  4643. NULL,
  4644. __cam_isp_ctx_buf_done_in_bubble,
  4645. },
  4646. },
  4647. /* BUBBLE APPLIED ie PRE_BUBBLE */
  4648. {
  4649. .irq_ops = {
  4650. __cam_isp_ctx_handle_error,
  4651. __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
  4652. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
  4653. NULL,
  4654. NULL,
  4655. __cam_isp_ctx_buf_done_in_bubble_applied,
  4656. },
  4657. },
  4658. /* HW ERROR */
  4659. {
  4660. },
  4661. /* HALT */
  4662. {
  4663. },
  4664. };
  4665. static int __cam_isp_ctx_rdi_only_apply_req_top_state(
  4666. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4667. {
  4668. int rc = 0;
  4669. struct cam_isp_context *ctx_isp =
  4670. (struct cam_isp_context *) ctx->ctx_priv;
  4671. CAM_DBG(CAM_ISP, "current Substate[%s]",
  4672. __cam_isp_ctx_substate_val_to_type(
  4673. ctx_isp->substate_activated));
  4674. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4675. CAM_ISP_CTX_ACTIVATED_APPLIED);
  4676. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4677. __cam_isp_ctx_substate_val_to_type(
  4678. ctx_isp->substate_activated));
  4679. if (rc)
  4680. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4681. "ctx_id:%d Apply failed in Substate[%s], rc %d",
  4682. ctx->ctx_id,
  4683. __cam_isp_ctx_substate_val_to_type(
  4684. ctx_isp->substate_activated), rc);
  4685. return rc;
  4686. }
  4687. static struct cam_ctx_ops
  4688. cam_isp_ctx_rdi_only_activated_state_machine
  4689. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  4690. /* SOF */
  4691. {
  4692. .ioctl_ops = {},
  4693. .crm_ops = {
  4694. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  4695. },
  4696. .irq_ops = NULL,
  4697. },
  4698. /* APPLIED */
  4699. {
  4700. .ioctl_ops = {},
  4701. .crm_ops = {},
  4702. .irq_ops = NULL,
  4703. },
  4704. /* EPOCH */
  4705. {
  4706. .ioctl_ops = {},
  4707. .crm_ops = {
  4708. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  4709. },
  4710. .irq_ops = NULL,
  4711. },
  4712. /* PRE BUBBLE */
  4713. {
  4714. .ioctl_ops = {},
  4715. .crm_ops = {},
  4716. .irq_ops = NULL,
  4717. },
  4718. /* BUBBLE */
  4719. {
  4720. .ioctl_ops = {},
  4721. .crm_ops = {},
  4722. .irq_ops = NULL,
  4723. },
  4724. /* HW ERROR */
  4725. {
  4726. .ioctl_ops = {},
  4727. .crm_ops = {},
  4728. .irq_ops = NULL,
  4729. },
  4730. /* HALT */
  4731. {
  4732. .ioctl_ops = {},
  4733. .crm_ops = {},
  4734. .irq_ops = NULL,
  4735. },
  4736. };
  4737. static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
  4738. struct cam_flush_dev_cmd *cmd)
  4739. {
  4740. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  4741. struct cam_req_mgr_flush_request flush_req;
  4742. if (!ctx_isp->offline_context) {
  4743. CAM_ERR(CAM_ISP, "flush dev only supported in offline context");
  4744. return -EINVAL;
  4745. }
  4746. flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL :
  4747. CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ;
  4748. flush_req.req_id = cmd->req_id;
  4749. CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu)", flush_req.type, flush_req.req_id);
  4750. switch (ctx->state) {
  4751. case CAM_CTX_ACQUIRED:
  4752. case CAM_CTX_ACTIVATED:
  4753. return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req);
  4754. case CAM_CTX_READY:
  4755. return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req);
  4756. default:
  4757. CAM_ERR(CAM_ISP, "flush dev in wrong state: %d", ctx->state);
  4758. return -EINVAL;
  4759. }
  4760. if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
  4761. cam_req_mgr_workq_flush(ctx_isp->workq);
  4762. }
  4763. static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
  4764. {
  4765. int i;
  4766. if (ctx->out_map_entries) {
  4767. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  4768. kfree(ctx->out_map_entries[i]);
  4769. ctx->out_map_entries[i] = NULL;
  4770. }
  4771. kfree(ctx->out_map_entries);
  4772. ctx->out_map_entries = NULL;
  4773. }
  4774. if (ctx->in_map_entries) {
  4775. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  4776. kfree(ctx->in_map_entries[i]);
  4777. ctx->in_map_entries[i] = NULL;
  4778. }
  4779. kfree(ctx->in_map_entries);
  4780. ctx->in_map_entries = NULL;
  4781. }
  4782. if (ctx->hw_update_entry) {
  4783. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  4784. kfree(ctx->hw_update_entry[i]);
  4785. ctx->hw_update_entry[i] = NULL;
  4786. }
  4787. kfree(ctx->hw_update_entry);
  4788. ctx->hw_update_entry = NULL;
  4789. }
  4790. ctx->max_out_map_entries = 0;
  4791. ctx->max_in_map_entries = 0;
  4792. ctx->max_hw_update_entries = 0;
  4793. }
  4794. static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
  4795. void *cmd)
  4796. {
  4797. int rc = 0;
  4798. struct cam_hw_release_args rel_arg;
  4799. struct cam_isp_context *ctx_isp =
  4800. (struct cam_isp_context *) ctx->ctx_priv;
  4801. struct cam_req_mgr_flush_request flush_req;
  4802. int i;
  4803. if (ctx_isp->hw_ctx) {
  4804. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4805. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  4806. &rel_arg);
  4807. ctx_isp->hw_ctx = NULL;
  4808. } else {
  4809. CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u]", ctx->ctx_id);
  4810. }
  4811. ctx->last_flush_req = 0;
  4812. ctx_isp->custom_enabled = false;
  4813. ctx_isp->use_frame_header_ts = false;
  4814. ctx_isp->use_default_apply = false;
  4815. ctx_isp->frame_id = 0;
  4816. ctx_isp->active_req_cnt = 0;
  4817. ctx_isp->reported_req_id = 0;
  4818. ctx_isp->reported_frame_id = 0;
  4819. ctx_isp->hw_acquired = false;
  4820. ctx_isp->init_received = false;
  4821. ctx_isp->support_consumed_addr = false;
  4822. ctx_isp->aeb_enabled = false;
  4823. ctx_isp->do_internal_recovery = false;
  4824. ctx_isp->req_info.last_bufdone_req_id = 0;
  4825. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4826. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4827. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4828. /*
  4829. * Ideally, we should never have any active request here.
  4830. * But we still add some sanity check code here to help the debug
  4831. */
  4832. if (!list_empty(&ctx->active_req_list))
  4833. CAM_WARN(CAM_ISP, "Active list is not empty");
  4834. /* Flush all the pending request list */
  4835. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  4836. flush_req.link_hdl = ctx->link_hdl;
  4837. flush_req.dev_hdl = ctx->dev_hdl;
  4838. flush_req.req_id = 0;
  4839. CAM_DBG(CAM_ISP, "try to flush pending list");
  4840. spin_lock_bh(&ctx->lock);
  4841. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  4842. spin_unlock_bh(&ctx->lock);
  4843. __cam_isp_ctx_free_mem_hw_entries(ctx);
  4844. cam_req_mgr_workq_destroy(&ctx_isp->workq);
  4845. ctx->state = CAM_CTX_ACQUIRED;
  4846. trace_cam_context_state("ISP", ctx);
  4847. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  4848. ctx->ctx_id, ctx->state);
  4849. return rc;
  4850. }
  4851. /* top level state machine */
  4852. static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
  4853. struct cam_release_dev_cmd *cmd)
  4854. {
  4855. int rc = 0;
  4856. int i;
  4857. struct cam_hw_release_args rel_arg;
  4858. struct cam_isp_context *ctx_isp =
  4859. (struct cam_isp_context *) ctx->ctx_priv;
  4860. struct cam_req_mgr_flush_request flush_req;
  4861. if (cmd && ctx_isp->hw_ctx) {
  4862. CAM_ERR(CAM_ISP, "releasing hw");
  4863. __cam_isp_ctx_release_hw_in_top_state(ctx, NULL);
  4864. }
  4865. if (ctx_isp->hw_ctx) {
  4866. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4867. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  4868. &rel_arg);
  4869. ctx_isp->hw_ctx = NULL;
  4870. }
  4871. ctx->session_hdl = -1;
  4872. ctx->dev_hdl = -1;
  4873. ctx->link_hdl = -1;
  4874. ctx->ctx_crm_intf = NULL;
  4875. ctx->last_flush_req = 0;
  4876. ctx_isp->frame_id = 0;
  4877. ctx_isp->active_req_cnt = 0;
  4878. ctx_isp->reported_req_id = 0;
  4879. ctx_isp->reported_frame_id = 0;
  4880. ctx_isp->hw_acquired = false;
  4881. ctx_isp->init_received = false;
  4882. ctx_isp->offline_context = false;
  4883. ctx_isp->rdi_only_context = false;
  4884. ctx_isp->req_info.last_bufdone_req_id = 0;
  4885. ctx_isp->v4l2_event_sub_ids = 0;
  4886. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4887. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4888. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4889. /*
  4890. * Ideally, we should never have any active request here.
  4891. * But we still add some sanity check code here to help the debug
  4892. */
  4893. if (!list_empty(&ctx->active_req_list))
  4894. CAM_ERR(CAM_ISP, "Active list is not empty");
  4895. /* Flush all the pending request list */
  4896. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  4897. flush_req.link_hdl = ctx->link_hdl;
  4898. flush_req.dev_hdl = ctx->dev_hdl;
  4899. flush_req.req_id = 0;
  4900. CAM_DBG(CAM_ISP, "try to flush pending list");
  4901. spin_lock_bh(&ctx->lock);
  4902. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  4903. spin_unlock_bh(&ctx->lock);
  4904. __cam_isp_ctx_free_mem_hw_entries(ctx);
  4905. ctx->state = CAM_CTX_AVAILABLE;
  4906. trace_cam_context_state("ISP", ctx);
  4907. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  4908. ctx->ctx_id, ctx->state);
  4909. return rc;
  4910. }
  4911. static int __cam_isp_ctx_config_dev_in_top_state(
  4912. struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
  4913. {
  4914. int rc = 0, i;
  4915. struct cam_ctx_request *req = NULL;
  4916. struct cam_isp_ctx_req *req_isp;
  4917. struct cam_packet *packet;
  4918. size_t remain_len = 0;
  4919. struct cam_hw_prepare_update_args cfg = {0};
  4920. struct cam_req_mgr_add_request add_req;
  4921. struct cam_isp_context *ctx_isp =
  4922. (struct cam_isp_context *) ctx->ctx_priv;
  4923. struct cam_hw_cmd_args hw_cmd_args;
  4924. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4925. uint32_t packet_opcode = 0;
  4926. CAM_DBG(CAM_ISP, "get free request object......");
  4927. /* get free request */
  4928. spin_lock_bh(&ctx->lock);
  4929. if (!list_empty(&ctx->free_req_list)) {
  4930. req = list_first_entry(&ctx->free_req_list,
  4931. struct cam_ctx_request, list);
  4932. list_del_init(&req->list);
  4933. }
  4934. spin_unlock_bh(&ctx->lock);
  4935. if (!req) {
  4936. CAM_ERR(CAM_ISP, "No more request obj free");
  4937. return -ENOMEM;
  4938. }
  4939. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4940. remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet);
  4941. if (IS_ERR(packet)) {
  4942. rc = PTR_ERR(packet);
  4943. goto free_req;
  4944. }
  4945. /* Query the packet opcode */
  4946. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4947. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4948. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE;
  4949. isp_hw_cmd_args.cmd_data = (void *)packet;
  4950. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4951. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4952. &hw_cmd_args);
  4953. if (rc) {
  4954. CAM_ERR(CAM_ISP, "HW command failed");
  4955. goto free_req;
  4956. }
  4957. packet_opcode = isp_hw_cmd_args.u.packet_op_code;
  4958. if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV)
  4959. && (packet->header.request_id <= ctx->last_flush_req)) {
  4960. CAM_INFO(CAM_ISP,
  4961. "request %lld has been flushed, reject packet",
  4962. packet->header.request_id);
  4963. rc = -EBADR;
  4964. goto free_req;
  4965. } else if ((packet_opcode == CAM_ISP_PACKET_INIT_DEV)
  4966. && (packet->header.request_id <= ctx->last_flush_req)
  4967. && ctx->last_flush_req && packet->header.request_id) {
  4968. CAM_WARN(CAM_ISP,
  4969. "last flushed req is %lld, config dev(init) for req %lld",
  4970. ctx->last_flush_req, packet->header.request_id);
  4971. rc = -EBADR;
  4972. goto free_req;
  4973. }
  4974. cfg.packet = packet;
  4975. cfg.remain_len = remain_len;
  4976. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4977. cfg.max_hw_update_entries = ctx->max_hw_update_entries;
  4978. cfg.hw_update_entries = req_isp->cfg;
  4979. cfg.max_out_map_entries = ctx->max_out_map_entries;
  4980. cfg.max_in_map_entries = ctx->max_in_map_entries;
  4981. cfg.out_map_entries = req_isp->fence_map_out;
  4982. cfg.in_map_entries = req_isp->fence_map_in;
  4983. cfg.priv = &req_isp->hw_update_data;
  4984. cfg.pf_data = &(req->pf_data);
  4985. cfg.num_out_map_entries = 0;
  4986. cfg.num_in_map_entries = 0;
  4987. memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
  4988. rc = ctx->hw_mgr_intf->hw_prepare_update(
  4989. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  4990. if (rc != 0) {
  4991. CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
  4992. rc = -EFAULT;
  4993. goto free_req;
  4994. }
  4995. req_isp->num_cfg = cfg.num_hw_update_entries;
  4996. req_isp->num_fence_map_out = cfg.num_out_map_entries;
  4997. req_isp->num_fence_map_in = cfg.num_in_map_entries;
  4998. req_isp->num_acked = 0;
  4999. req_isp->num_deferred_acks = 0;
  5000. req_isp->bubble_detected = false;
  5001. req_isp->cdm_reset_before_apply = false;
  5002. req_isp->hw_update_data.packet = packet;
  5003. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  5004. rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
  5005. if (rc) {
  5006. CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
  5007. req_isp->fence_map_out[i].sync_id);
  5008. goto put_ref;
  5009. }
  5010. }
  5011. CAM_DBG(CAM_ISP,
  5012. "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d",
  5013. packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
  5014. req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in);
  5015. req->request_id = packet->header.request_id;
  5016. req->status = 1;
  5017. if (req_isp->hw_update_data.packet_opcode_type ==
  5018. CAM_ISP_PACKET_INIT_DEV) {
  5019. if (ctx->state < CAM_CTX_ACTIVATED) {
  5020. rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
  5021. if (rc)
  5022. CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
  5023. ctx_isp->init_received = true;
  5024. } else {
  5025. rc = -EINVAL;
  5026. CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state:%d",
  5027. ctx->state);
  5028. }
  5029. } else {
  5030. if ((ctx->state == CAM_CTX_FLUSHED) || (ctx->state < CAM_CTX_READY)) {
  5031. rc = -EINVAL;
  5032. CAM_ERR(CAM_ISP, "Received update req %lld in wrong state:%d",
  5033. req->request_id, ctx->state);
  5034. goto put_ref;
  5035. }
  5036. if (ctx_isp->offline_context) {
  5037. __cam_isp_ctx_enqueue_request_in_order(ctx, req);
  5038. } else if (ctx->ctx_crm_intf->add_req) {
  5039. memset(&add_req, 0, sizeof(add_req));
  5040. add_req.link_hdl = ctx->link_hdl;
  5041. add_req.dev_hdl = ctx->dev_hdl;
  5042. add_req.req_id = req->request_id;
  5043. rc = ctx->ctx_crm_intf->add_req(&add_req);
  5044. if (rc) {
  5045. CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
  5046. req->request_id);
  5047. } else {
  5048. __cam_isp_ctx_enqueue_request_in_order(
  5049. ctx, req);
  5050. }
  5051. } else {
  5052. CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu", req->request_id);
  5053. rc = -ENODEV;
  5054. }
  5055. }
  5056. if (rc)
  5057. goto put_ref;
  5058. CAM_DBG(CAM_REQ,
  5059. "Preprocessing Config req_id %lld successful on ctx %u",
  5060. req->request_id, ctx->ctx_id);
  5061. if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch)) {
  5062. __cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
  5063. }
  5064. return rc;
  5065. put_ref:
  5066. for (--i; i >= 0; i--) {
  5067. if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id))
  5068. CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
  5069. req_isp->fence_map_out[i].sync_id);
  5070. }
  5071. free_req:
  5072. spin_lock_bh(&ctx->lock);
  5073. list_add_tail(&req->list, &ctx->free_req_list);
  5074. spin_unlock_bh(&ctx->lock);
  5075. return rc;
  5076. }
  5077. static int __cam_isp_ctx_allocate_mem_hw_entries(
  5078. struct cam_context *ctx,
  5079. struct cam_hw_acquire_args *param)
  5080. {
  5081. int rc = 0, i;
  5082. uint32_t max_res = 0;
  5083. uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX;
  5084. struct cam_ctx_request *req;
  5085. struct cam_ctx_request *temp_req;
  5086. struct cam_isp_ctx_req *req_isp;
  5087. if (!param->op_params.param_list[0])
  5088. max_res = CAM_ISP_CTX_RES_MAX;
  5089. else {
  5090. max_res = param->op_params.param_list[0];
  5091. if (param->op_flags & CAM_IFE_CTX_SFE_EN) {
  5092. max_res += param->op_params.param_list[1];
  5093. max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX;
  5094. }
  5095. }
  5096. ctx->max_in_map_entries = max_res;
  5097. ctx->max_out_map_entries = max_res;
  5098. ctx->max_hw_update_entries = max_hw_upd_entries;
  5099. CAM_DBG(CAM_ISP,
  5100. "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d",
  5101. max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN));
  5102. ctx->hw_update_entry = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_update_entry *),
  5103. GFP_KERNEL);
  5104. if (!ctx->hw_update_entry) {
  5105. CAM_ERR(CAM_CTXT, "%s[%d] no memory ",
  5106. ctx->dev_name, ctx->ctx_id);
  5107. return -ENOMEM;
  5108. }
  5109. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5110. ctx->hw_update_entry[i] = kcalloc(ctx->max_hw_update_entries,
  5111. sizeof(struct cam_hw_update_entry), GFP_KERNEL);
  5112. if (!ctx->hw_update_entry[i]) {
  5113. CAM_ERR(CAM_CTXT, "%s[%d] no memory for hw_update_entry: %u",
  5114. ctx->dev_name, ctx->ctx_id, i);
  5115. return -ENOMEM;
  5116. }
  5117. }
  5118. ctx->in_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5119. GFP_KERNEL);
  5120. if (!ctx->in_map_entries) {
  5121. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries",
  5122. ctx->dev_name, ctx->ctx_id);
  5123. rc = -ENOMEM;
  5124. goto end;
  5125. }
  5126. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5127. ctx->in_map_entries[i] = kcalloc(ctx->max_in_map_entries,
  5128. sizeof(struct cam_hw_fence_map_entry),
  5129. GFP_KERNEL);
  5130. if (!ctx->in_map_entries[i]) {
  5131. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries: %u",
  5132. ctx->dev_name, ctx->ctx_id, i);
  5133. rc = -ENOMEM;
  5134. goto end;
  5135. }
  5136. }
  5137. ctx->out_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5138. GFP_KERNEL);
  5139. if (!ctx->out_map_entries) {
  5140. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries",
  5141. ctx->dev_name, ctx->ctx_id);
  5142. rc = -ENOMEM;
  5143. goto end;
  5144. }
  5145. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5146. ctx->out_map_entries[i] = kcalloc(ctx->max_out_map_entries,
  5147. sizeof(struct cam_hw_fence_map_entry),
  5148. GFP_KERNEL);
  5149. if (!ctx->out_map_entries[i]) {
  5150. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries: %u",
  5151. ctx->dev_name, ctx->ctx_id, i);
  5152. rc = -ENOMEM;
  5153. goto end;
  5154. }
  5155. }
  5156. list_for_each_entry_safe(req, temp_req,
  5157. &ctx->free_req_list, list) {
  5158. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5159. req_isp->cfg = ctx->hw_update_entry[req->index];
  5160. req_isp->fence_map_in = ctx->in_map_entries[req->index];
  5161. req_isp->fence_map_out = ctx->out_map_entries[req->index];
  5162. }
  5163. return rc;
  5164. end:
  5165. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5166. return rc;
  5167. }
  5168. static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
  5169. struct cam_acquire_dev_cmd *cmd)
  5170. {
  5171. int rc = 0;
  5172. int i;
  5173. struct cam_hw_acquire_args param;
  5174. struct cam_isp_resource *isp_res = NULL;
  5175. struct cam_create_dev_hdl req_hdl_param;
  5176. struct cam_hw_release_args release;
  5177. struct cam_isp_context *ctx_isp =
  5178. (struct cam_isp_context *) ctx->ctx_priv;
  5179. struct cam_hw_cmd_args hw_cmd_args;
  5180. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5181. if (!ctx->hw_mgr_intf) {
  5182. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5183. rc = -EFAULT;
  5184. goto end;
  5185. }
  5186. CAM_DBG(CAM_ISP,
  5187. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  5188. cmd->session_handle, cmd->num_resources,
  5189. cmd->handle_type, cmd->resource_hdl);
  5190. ctx_isp->v4l2_event_sub_ids = cam_req_mgr_get_id_subscribed();
  5191. if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
  5192. ctx_isp->split_acquire = true;
  5193. CAM_DBG(CAM_ISP, "Acquire dev handle");
  5194. goto get_dev_handle;
  5195. }
  5196. if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
  5197. CAM_ERR(CAM_ISP, "Too much resources in the acquire");
  5198. rc = -ENOMEM;
  5199. goto end;
  5200. }
  5201. /* for now we only support user pointer */
  5202. if (cmd->handle_type != 1) {
  5203. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5204. rc = -EINVAL;
  5205. goto end;
  5206. }
  5207. isp_res = kzalloc(
  5208. sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
  5209. if (!isp_res) {
  5210. rc = -ENOMEM;
  5211. goto end;
  5212. }
  5213. CAM_DBG(CAM_ISP, "start copy %d resources from user",
  5214. cmd->num_resources);
  5215. if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
  5216. sizeof(*isp_res)*cmd->num_resources)) {
  5217. rc = -EFAULT;
  5218. goto free_res;
  5219. }
  5220. memset(&param, 0, sizeof(param));
  5221. param.context_data = ctx;
  5222. param.event_cb = ctx->irq_cb_intf;
  5223. param.num_acq = cmd->num_resources;
  5224. param.acquire_info = (uintptr_t) isp_res;
  5225. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5226. if (rc) {
  5227. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5228. ctx->ctx_id);
  5229. goto free_res;
  5230. }
  5231. /* call HW manager to reserve the resource */
  5232. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5233. &param);
  5234. if (rc != 0) {
  5235. CAM_ERR(CAM_ISP, "Acquire device failed");
  5236. goto free_res;
  5237. }
  5238. /* Query the context has rdi only resource */
  5239. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5240. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5241. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5242. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5243. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5244. &hw_cmd_args);
  5245. if (rc) {
  5246. CAM_ERR(CAM_ISP, "HW command failed");
  5247. goto free_hw;
  5248. }
  5249. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5250. /*
  5251. * this context has rdi only resource assign rdi only
  5252. * state machine
  5253. */
  5254. CAM_DBG(CAM_ISP, "RDI only session Context");
  5255. ctx_isp->substate_machine_irq =
  5256. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5257. ctx_isp->substate_machine =
  5258. cam_isp_ctx_rdi_only_activated_state_machine;
  5259. ctx_isp->rdi_only_context = true;
  5260. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5261. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5262. ctx_isp->substate_machine_irq =
  5263. cam_isp_ctx_fs2_state_machine_irq;
  5264. ctx_isp->substate_machine =
  5265. cam_isp_ctx_fs2_state_machine;
  5266. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5267. CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
  5268. ctx_isp->substate_machine_irq =
  5269. cam_isp_ctx_offline_state_machine_irq;
  5270. } else {
  5271. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5272. ctx_isp->substate_machine_irq =
  5273. cam_isp_ctx_activated_state_machine_irq;
  5274. ctx_isp->substate_machine =
  5275. cam_isp_ctx_activated_state_machine;
  5276. }
  5277. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5278. ctx_isp->hw_acquired = true;
  5279. ctx_isp->split_acquire = false;
  5280. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5281. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5282. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5283. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5284. kfree(isp_res);
  5285. isp_res = NULL;
  5286. get_dev_handle:
  5287. req_hdl_param.session_hdl = cmd->session_handle;
  5288. /* bridge is not ready for these flags. so false for now */
  5289. req_hdl_param.v4l2_sub_dev_flag = 0;
  5290. req_hdl_param.media_entity_flag = 0;
  5291. req_hdl_param.ops = ctx->crm_ctx_intf;
  5292. req_hdl_param.priv = ctx;
  5293. req_hdl_param.dev_id = CAM_ISP;
  5294. CAM_DBG(CAM_ISP, "get device handle form bridge");
  5295. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  5296. if (ctx->dev_hdl <= 0) {
  5297. rc = -EFAULT;
  5298. CAM_ERR(CAM_ISP, "Can not create device handle");
  5299. goto free_hw;
  5300. }
  5301. cmd->dev_handle = ctx->dev_hdl;
  5302. /* store session information */
  5303. ctx->session_hdl = cmd->session_handle;
  5304. ctx->state = CAM_CTX_ACQUIRED;
  5305. trace_cam_context_state("ISP", ctx);
  5306. CAM_DBG(CAM_ISP,
  5307. "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
  5308. cmd->session_handle, cmd->num_resources, ctx->ctx_id);
  5309. return rc;
  5310. free_hw:
  5311. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5312. if (ctx_isp->hw_acquired)
  5313. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5314. &release);
  5315. ctx_isp->hw_ctx = NULL;
  5316. ctx_isp->hw_acquired = false;
  5317. free_res:
  5318. kfree(isp_res);
  5319. end:
  5320. return rc;
  5321. }
  5322. static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
  5323. void *args)
  5324. {
  5325. int rc = 0;
  5326. int i;
  5327. struct cam_acquire_hw_cmd_v1 *cmd =
  5328. (struct cam_acquire_hw_cmd_v1 *)args;
  5329. struct cam_hw_acquire_args param;
  5330. struct cam_hw_release_args release;
  5331. struct cam_isp_context *ctx_isp =
  5332. (struct cam_isp_context *) ctx->ctx_priv;
  5333. struct cam_hw_cmd_args hw_cmd_args;
  5334. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5335. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5336. if (!ctx->hw_mgr_intf) {
  5337. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5338. rc = -EFAULT;
  5339. goto end;
  5340. }
  5341. CAM_DBG(CAM_ISP,
  5342. "session_hdl 0x%x, hdl type %d, res %lld",
  5343. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5344. /* for now we only support user pointer */
  5345. if (cmd->handle_type != 1) {
  5346. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5347. rc = -EINVAL;
  5348. goto end;
  5349. }
  5350. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5351. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5352. goto end;
  5353. }
  5354. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5355. if (!acquire_hw_info) {
  5356. rc = -ENOMEM;
  5357. goto end;
  5358. }
  5359. CAM_DBG(CAM_ISP, "start copy resources from user");
  5360. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5361. cmd->data_size)) {
  5362. rc = -EFAULT;
  5363. goto free_res;
  5364. }
  5365. memset(&param, 0, sizeof(param));
  5366. param.context_data = ctx;
  5367. param.event_cb = ctx->irq_cb_intf;
  5368. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5369. param.acquire_info_size = cmd->data_size;
  5370. param.acquire_info = (uint64_t) acquire_hw_info;
  5371. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5372. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
  5373. &param);
  5374. if (rc) {
  5375. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5376. ctx->ctx_id);
  5377. goto free_res;
  5378. }
  5379. /* call HW manager to reserve the resource */
  5380. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5381. &param);
  5382. if (rc != 0) {
  5383. CAM_ERR(CAM_ISP, "Acquire device failed");
  5384. goto free_res;
  5385. }
  5386. ctx_isp->support_consumed_addr =
  5387. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5388. /* Query the context has rdi only resource */
  5389. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5390. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5391. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5392. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5393. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5394. &hw_cmd_args);
  5395. if (rc) {
  5396. CAM_ERR(CAM_ISP, "HW command failed");
  5397. goto free_hw;
  5398. }
  5399. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5400. /*
  5401. * this context has rdi only resource assign rdi only
  5402. * state machine
  5403. */
  5404. CAM_DBG(CAM_ISP, "RDI only session Context");
  5405. ctx_isp->substate_machine_irq =
  5406. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5407. ctx_isp->substate_machine =
  5408. cam_isp_ctx_rdi_only_activated_state_machine;
  5409. ctx_isp->rdi_only_context = true;
  5410. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5411. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5412. ctx_isp->substate_machine_irq =
  5413. cam_isp_ctx_fs2_state_machine_irq;
  5414. ctx_isp->substate_machine =
  5415. cam_isp_ctx_fs2_state_machine;
  5416. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5417. CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
  5418. ctx_isp->substate_machine_irq =
  5419. cam_isp_ctx_offline_state_machine_irq;
  5420. ctx_isp->substate_machine = NULL;
  5421. } else {
  5422. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5423. ctx_isp->substate_machine_irq =
  5424. cam_isp_ctx_activated_state_machine_irq;
  5425. ctx_isp->substate_machine =
  5426. cam_isp_ctx_activated_state_machine;
  5427. }
  5428. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5429. ctx_isp->hw_acquired = true;
  5430. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5431. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5432. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5433. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5434. trace_cam_context_state("ISP", ctx);
  5435. CAM_DBG(CAM_ISP,
  5436. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5437. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5438. kfree(acquire_hw_info);
  5439. return rc;
  5440. free_hw:
  5441. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5442. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5443. ctx_isp->hw_ctx = NULL;
  5444. ctx_isp->hw_acquired = false;
  5445. free_res:
  5446. kfree(acquire_hw_info);
  5447. end:
  5448. return rc;
  5449. }
  5450. static void cam_req_mgr_process_workq_offline_ife_worker(struct work_struct *w)
  5451. {
  5452. cam_req_mgr_process_workq(w);
  5453. }
  5454. static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
  5455. void *args)
  5456. {
  5457. int rc = 0, i, j;
  5458. struct cam_acquire_hw_cmd_v2 *cmd =
  5459. (struct cam_acquire_hw_cmd_v2 *)args;
  5460. struct cam_hw_acquire_args param;
  5461. struct cam_hw_release_args release;
  5462. struct cam_isp_context *ctx_isp =
  5463. (struct cam_isp_context *) ctx->ctx_priv;
  5464. struct cam_hw_cmd_args hw_cmd_args;
  5465. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5466. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5467. if (!ctx->hw_mgr_intf) {
  5468. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5469. rc = -EFAULT;
  5470. goto end;
  5471. }
  5472. CAM_DBG(CAM_ISP,
  5473. "session_hdl 0x%x, hdl type %d, res %lld",
  5474. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5475. /* for now we only support user pointer */
  5476. if (cmd->handle_type != 1) {
  5477. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5478. rc = -EINVAL;
  5479. goto end;
  5480. }
  5481. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5482. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5483. goto end;
  5484. }
  5485. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5486. if (!acquire_hw_info) {
  5487. rc = -ENOMEM;
  5488. goto end;
  5489. }
  5490. CAM_DBG(CAM_ISP, "start copy resources from user");
  5491. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5492. cmd->data_size)) {
  5493. rc = -EFAULT;
  5494. goto free_res;
  5495. }
  5496. memset(&param, 0, sizeof(param));
  5497. param.context_data = ctx;
  5498. param.event_cb = ctx->irq_cb_intf;
  5499. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5500. param.acquire_info_size = cmd->data_size;
  5501. param.acquire_info = (uint64_t) acquire_hw_info;
  5502. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5503. /* call HW manager to reserve the resource */
  5504. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5505. &param);
  5506. if (rc != 0) {
  5507. CAM_ERR(CAM_ISP, "Acquire device failed");
  5508. goto free_res;
  5509. }
  5510. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5511. if (rc) {
  5512. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5513. ctx->ctx_id);
  5514. goto free_hw;
  5515. }
  5516. /*
  5517. * Set feature flag if applicable
  5518. * custom hw is supported only on v2
  5519. */
  5520. ctx_isp->custom_enabled =
  5521. (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
  5522. ctx_isp->use_frame_header_ts =
  5523. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5524. ctx_isp->use_default_apply =
  5525. (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG);
  5526. ctx_isp->support_consumed_addr =
  5527. (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
  5528. ctx_isp->aeb_enabled =
  5529. (param.op_flags & CAM_IFE_CTX_AEB_EN);
  5530. if ((ctx_isp->aeb_enabled) && (!isp_ctx_debug.disable_internal_recovery))
  5531. ctx_isp->do_internal_recovery = true;
  5532. /* Query the context has rdi only resource */
  5533. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5534. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5535. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5536. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5537. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5538. &hw_cmd_args);
  5539. if (rc) {
  5540. CAM_ERR(CAM_ISP, "HW command failed");
  5541. goto free_hw;
  5542. }
  5543. if (param.valid_acquired_hw) {
  5544. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5545. cmd->hw_info.acquired_hw_id[i] =
  5546. param.acquired_hw_id[i];
  5547. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5548. for (j = 0; j < CAM_MAX_HW_SPLIT; j++)
  5549. cmd->hw_info.acquired_hw_path[i][j] =
  5550. param.acquired_hw_path[i][j];
  5551. }
  5552. cmd->hw_info.valid_acquired_hw =
  5553. param.valid_acquired_hw;
  5554. cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw;
  5555. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5556. /*
  5557. * this context has rdi only resource assign rdi only
  5558. * state machine
  5559. */
  5560. CAM_DBG(CAM_ISP, "RDI only session Context");
  5561. ctx_isp->substate_machine_irq =
  5562. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5563. ctx_isp->substate_machine =
  5564. cam_isp_ctx_rdi_only_activated_state_machine;
  5565. ctx_isp->rdi_only_context = true;
  5566. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5567. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5568. ctx_isp->substate_machine_irq =
  5569. cam_isp_ctx_fs2_state_machine_irq;
  5570. ctx_isp->substate_machine =
  5571. cam_isp_ctx_fs2_state_machine;
  5572. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5573. CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
  5574. ctx_isp->substate_machine_irq =
  5575. cam_isp_ctx_offline_state_machine_irq;
  5576. ctx_isp->substate_machine = NULL;
  5577. ctx_isp->offline_context = true;
  5578. rc = cam_req_mgr_workq_create("offline_ife", 20,
  5579. &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0,
  5580. cam_req_mgr_process_workq_offline_ife_worker);
  5581. if (rc)
  5582. CAM_ERR(CAM_ISP,
  5583. "Failed to create workq for offline IFE rc:%d",
  5584. rc);
  5585. } else {
  5586. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5587. ctx_isp->substate_machine_irq =
  5588. cam_isp_ctx_activated_state_machine_irq;
  5589. ctx_isp->substate_machine =
  5590. cam_isp_ctx_activated_state_machine;
  5591. }
  5592. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5593. ctx_isp->hw_acquired = true;
  5594. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5595. trace_cam_context_state("ISP", ctx);
  5596. CAM_DBG(CAM_ISP,
  5597. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5598. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5599. kfree(acquire_hw_info);
  5600. return rc;
  5601. free_hw:
  5602. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5603. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5604. ctx_isp->hw_ctx = NULL;
  5605. ctx_isp->hw_acquired = false;
  5606. free_res:
  5607. kfree(acquire_hw_info);
  5608. end:
  5609. return rc;
  5610. }
  5611. static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
  5612. void *args)
  5613. {
  5614. int rc = -EINVAL;
  5615. uint32_t api_version;
  5616. if (!ctx || !args) {
  5617. CAM_ERR(CAM_ISP, "Invalid input pointer");
  5618. return rc;
  5619. }
  5620. api_version = *((uint32_t *)args);
  5621. if (api_version == 1)
  5622. rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
  5623. else if (api_version == 2)
  5624. rc = __cam_isp_ctx_acquire_hw_v2(ctx, args);
  5625. else
  5626. CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
  5627. return rc;
  5628. }
  5629. static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
  5630. struct cam_config_dev_cmd *cmd)
  5631. {
  5632. int rc = 0;
  5633. struct cam_isp_context *ctx_isp =
  5634. (struct cam_isp_context *) ctx->ctx_priv;
  5635. if (!ctx_isp->hw_acquired) {
  5636. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  5637. return -EINVAL;
  5638. }
  5639. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  5640. if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
  5641. ctx->state = CAM_CTX_READY;
  5642. trace_cam_context_state("ISP", ctx);
  5643. }
  5644. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  5645. return rc;
  5646. }
  5647. static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
  5648. struct cam_config_dev_cmd *cmd)
  5649. {
  5650. int rc = 0;
  5651. struct cam_start_stop_dev_cmd start_cmd;
  5652. struct cam_hw_cmd_args hw_cmd_args;
  5653. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5654. struct cam_isp_context *ctx_isp =
  5655. (struct cam_isp_context *) ctx->ctx_priv;
  5656. if (!ctx_isp->hw_acquired) {
  5657. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  5658. rc = -EINVAL;
  5659. goto end;
  5660. }
  5661. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  5662. if (rc)
  5663. goto end;
  5664. if (!ctx_isp->init_received) {
  5665. CAM_WARN(CAM_ISP,
  5666. "Received update packet in flushed state, skip start");
  5667. goto end;
  5668. }
  5669. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5670. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5671. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  5672. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5673. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5674. &hw_cmd_args);
  5675. if (rc) {
  5676. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
  5677. goto end;
  5678. }
  5679. start_cmd.dev_handle = cmd->dev_handle;
  5680. start_cmd.session_handle = cmd->session_handle;
  5681. rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
  5682. if (rc)
  5683. CAM_ERR(CAM_ISP,
  5684. "Failed to re-start HW after flush rc: %d", rc);
  5685. else
  5686. CAM_INFO(CAM_ISP,
  5687. "Received init after flush. Re-start HW complete in ctx:%d",
  5688. ctx->ctx_id);
  5689. end:
  5690. CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
  5691. ctx_isp->substate_activated);
  5692. return rc;
  5693. }
  5694. static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
  5695. struct cam_req_mgr_core_dev_link_setup *link)
  5696. {
  5697. int rc = 0;
  5698. struct cam_isp_context *ctx_isp =
  5699. (struct cam_isp_context *) ctx->ctx_priv;
  5700. if (!link) {
  5701. CAM_ERR(CAM_ISP, "setup link info is null: %pK ctx: %u",
  5702. link, ctx->ctx_id);
  5703. return -EINVAL;
  5704. }
  5705. if (!link->crm_cb) {
  5706. CAM_ERR(CAM_ISP, "crm cb is null: %pK ctx: %u",
  5707. link->crm_cb, ctx->ctx_id);
  5708. return -EINVAL;
  5709. }
  5710. CAM_DBG(CAM_ISP, "Enter.........");
  5711. ctx->link_hdl = link->link_hdl;
  5712. ctx->ctx_crm_intf = link->crm_cb;
  5713. ctx_isp->subscribe_event =
  5714. CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF;
  5715. ctx_isp->trigger_id = link->trigger_id;
  5716. /* change state only if we had the init config */
  5717. if (ctx_isp->init_received) {
  5718. ctx->state = CAM_CTX_READY;
  5719. trace_cam_context_state("ISP", ctx);
  5720. }
  5721. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  5722. return rc;
  5723. }
  5724. static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
  5725. struct cam_req_mgr_core_dev_link_setup *unlink)
  5726. {
  5727. int rc = 0;
  5728. struct cam_isp_context *ctx_isp =
  5729. (struct cam_isp_context *) ctx->ctx_priv;
  5730. ctx->link_hdl = -1;
  5731. ctx->ctx_crm_intf = NULL;
  5732. ctx_isp->trigger_id = -1;
  5733. return rc;
  5734. }
  5735. static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  5736. struct cam_req_mgr_device_info *dev_info)
  5737. {
  5738. int rc = 0;
  5739. dev_info->dev_hdl = ctx->dev_hdl;
  5740. strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
  5741. dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
  5742. dev_info->p_delay = 1;
  5743. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  5744. dev_info->trigger_on = true;
  5745. return rc;
  5746. }
  5747. static inline void __cam_isp_context_reset_ctx_params(
  5748. struct cam_isp_context *ctx_isp)
  5749. {
  5750. atomic_set(&ctx_isp->process_bubble, 0);
  5751. atomic_set(&ctx_isp->rxd_epoch, 0);
  5752. atomic_set(&ctx_isp->internal_recovery_set, 0);
  5753. ctx_isp->frame_id = 0;
  5754. ctx_isp->sof_timestamp_val = 0;
  5755. ctx_isp->boot_timestamp = 0;
  5756. ctx_isp->active_req_cnt = 0;
  5757. ctx_isp->reported_req_id = 0;
  5758. ctx_isp->reported_frame_id = 0;
  5759. ctx_isp->bubble_frame_cnt = 0;
  5760. ctx_isp->recovery_req_id = 0;
  5761. ctx_isp->aeb_error_cnt = 0;
  5762. }
  5763. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  5764. struct cam_start_stop_dev_cmd *cmd)
  5765. {
  5766. int rc = 0;
  5767. int i;
  5768. struct cam_isp_start_args start_isp;
  5769. struct cam_ctx_request *req;
  5770. struct cam_isp_ctx_req *req_isp;
  5771. struct cam_isp_context *ctx_isp =
  5772. (struct cam_isp_context *) ctx->ctx_priv;
  5773. if (cmd->session_handle != ctx->session_hdl ||
  5774. cmd->dev_handle != ctx->dev_hdl) {
  5775. rc = -EPERM;
  5776. goto end;
  5777. }
  5778. if (list_empty(&ctx->pending_req_list)) {
  5779. /* should never happen */
  5780. CAM_ERR(CAM_ISP, "Start device with empty configuration");
  5781. rc = -EFAULT;
  5782. goto end;
  5783. } else {
  5784. req = list_first_entry(&ctx->pending_req_list,
  5785. struct cam_ctx_request, list);
  5786. }
  5787. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5788. if (!ctx_isp->hw_ctx) {
  5789. CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
  5790. rc = -EFAULT;
  5791. goto end;
  5792. }
  5793. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5794. start_isp.hw_config.request_id = req->request_id;
  5795. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  5796. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  5797. start_isp.hw_config.priv = &req_isp->hw_update_data;
  5798. start_isp.hw_config.init_packet = 1;
  5799. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_NONE;
  5800. start_isp.hw_config.cdm_reset_before_apply = false;
  5801. start_isp.is_internal_start = false;
  5802. ctx_isp->last_applied_req_id = req->request_id;
  5803. if (ctx->state == CAM_CTX_FLUSHED)
  5804. start_isp.start_only = true;
  5805. else
  5806. start_isp.start_only = false;
  5807. __cam_isp_context_reset_ctx_params(ctx_isp);
  5808. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  5809. CAM_ISP_CTX_ACTIVATED_APPLIED :
  5810. (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
  5811. CAM_ISP_CTX_ACTIVATED_SOF;
  5812. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5813. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5814. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5815. /*
  5816. * In case of CSID TPG we might receive SOF and RUP IRQs
  5817. * before hw_mgr_intf->hw_start has returned. So move
  5818. * req out of pending list before hw_start and add it
  5819. * back to pending list if hw_start fails.
  5820. */
  5821. list_del_init(&req->list);
  5822. if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
  5823. list_add_tail(&req->list, &ctx->free_req_list);
  5824. atomic_set(&ctx_isp->rxd_epoch, 1);
  5825. CAM_DBG(CAM_REQ,
  5826. "Move pending req: %lld to free list(cnt: %d) offline ctx %u",
  5827. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  5828. } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
  5829. list_add_tail(&req->list, &ctx->wait_req_list);
  5830. CAM_DBG(CAM_REQ,
  5831. "Move pending req: %lld to wait list(cnt: %d) ctx %u",
  5832. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  5833. } else {
  5834. list_add_tail(&req->list, &ctx->active_req_list);
  5835. ctx_isp->active_req_cnt++;
  5836. CAM_DBG(CAM_REQ,
  5837. "Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
  5838. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
  5839. ctx_isp->offline_context);
  5840. }
  5841. /*
  5842. * Only place to change state before calling the hw due to
  5843. * hardware tasklet has higher priority that can cause the
  5844. * irq handling comes early
  5845. */
  5846. ctx->state = CAM_CTX_ACTIVATED;
  5847. trace_cam_context_state("ISP", ctx);
  5848. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  5849. &start_isp);
  5850. if (rc) {
  5851. /* HW failure. user need to clean up the resource */
  5852. CAM_ERR(CAM_ISP, "Start HW failed");
  5853. ctx->state = CAM_CTX_READY;
  5854. if ((rc == -ETIMEDOUT) &&
  5855. (isp_ctx_debug.enable_cdm_cmd_buff_dump))
  5856. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  5857. trace_cam_context_state("ISP", ctx);
  5858. list_del_init(&req->list);
  5859. list_add(&req->list, &ctx->pending_req_list);
  5860. goto end;
  5861. }
  5862. CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
  5863. end:
  5864. return rc;
  5865. }
  5866. static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
  5867. struct cam_req_mgr_core_dev_link_setup *unlink)
  5868. {
  5869. int rc = 0;
  5870. ctx->link_hdl = -1;
  5871. ctx->ctx_crm_intf = NULL;
  5872. ctx->state = CAM_CTX_ACQUIRED;
  5873. trace_cam_context_state("ISP", ctx);
  5874. return rc;
  5875. }
  5876. static int __cam_isp_ctx_stop_dev_in_activated_unlock(
  5877. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  5878. {
  5879. int rc = 0;
  5880. uint32_t i;
  5881. struct cam_hw_stop_args stop;
  5882. struct cam_ctx_request *req;
  5883. struct cam_isp_ctx_req *req_isp;
  5884. struct cam_isp_context *ctx_isp =
  5885. (struct cam_isp_context *) ctx->ctx_priv;
  5886. struct cam_isp_stop_args stop_isp;
  5887. /* Mask off all the incoming hardware events */
  5888. spin_lock_bh(&ctx->lock);
  5889. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  5890. spin_unlock_bh(&ctx->lock);
  5891. /* stop hw first */
  5892. if (ctx_isp->hw_ctx) {
  5893. stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5894. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  5895. stop_isp.stop_only = false;
  5896. stop_isp.is_internal_stop = false;
  5897. stop.args = (void *) &stop_isp;
  5898. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  5899. &stop);
  5900. }
  5901. CAM_DBG(CAM_ISP, "next Substate[%s]",
  5902. __cam_isp_ctx_substate_val_to_type(
  5903. ctx_isp->substate_activated));
  5904. if (ctx->ctx_crm_intf &&
  5905. ctx->ctx_crm_intf->notify_stop) {
  5906. struct cam_req_mgr_notify_stop notify;
  5907. notify.link_hdl = ctx->link_hdl;
  5908. CAM_DBG(CAM_ISP,
  5909. "Notify CRM about device stop ctx %u link 0x%x",
  5910. ctx->ctx_id, ctx->link_hdl);
  5911. ctx->ctx_crm_intf->notify_stop(&notify);
  5912. } else if (!ctx_isp->offline_context)
  5913. CAM_ERR(CAM_ISP, "cb not present");
  5914. while (!list_empty(&ctx->pending_req_list)) {
  5915. req = list_first_entry(&ctx->pending_req_list,
  5916. struct cam_ctx_request, list);
  5917. list_del_init(&req->list);
  5918. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5919. CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
  5920. req_isp->num_fence_map_out);
  5921. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5922. if (req_isp->fence_map_out[i].sync_id != -1) {
  5923. cam_sync_signal(
  5924. req_isp->fence_map_out[i].sync_id,
  5925. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5926. CAM_SYNC_ISP_EVENT_HW_STOP);
  5927. }
  5928. list_add_tail(&req->list, &ctx->free_req_list);
  5929. }
  5930. while (!list_empty(&ctx->wait_req_list)) {
  5931. req = list_first_entry(&ctx->wait_req_list,
  5932. struct cam_ctx_request, list);
  5933. list_del_init(&req->list);
  5934. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5935. CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
  5936. req_isp->num_fence_map_out);
  5937. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5938. if (req_isp->fence_map_out[i].sync_id != -1) {
  5939. cam_sync_signal(
  5940. req_isp->fence_map_out[i].sync_id,
  5941. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5942. CAM_SYNC_ISP_EVENT_HW_STOP);
  5943. }
  5944. list_add_tail(&req->list, &ctx->free_req_list);
  5945. }
  5946. while (!list_empty(&ctx->active_req_list)) {
  5947. req = list_first_entry(&ctx->active_req_list,
  5948. struct cam_ctx_request, list);
  5949. list_del_init(&req->list);
  5950. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5951. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  5952. req_isp->num_fence_map_out);
  5953. for (i = 0; i < req_isp->num_fence_map_out; i++)
  5954. if (req_isp->fence_map_out[i].sync_id != -1) {
  5955. cam_sync_signal(
  5956. req_isp->fence_map_out[i].sync_id,
  5957. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5958. CAM_SYNC_ISP_EVENT_HW_STOP);
  5959. }
  5960. list_add_tail(&req->list, &ctx->free_req_list);
  5961. }
  5962. ctx_isp->frame_id = 0;
  5963. ctx_isp->active_req_cnt = 0;
  5964. ctx_isp->reported_req_id = 0;
  5965. ctx_isp->reported_frame_id = 0;
  5966. ctx_isp->last_applied_req_id = 0;
  5967. ctx_isp->req_info.last_bufdone_req_id = 0;
  5968. ctx_isp->bubble_frame_cnt = 0;
  5969. atomic_set(&ctx_isp->process_bubble, 0);
  5970. atomic_set(&ctx_isp->internal_recovery_set, 0);
  5971. atomic_set(&ctx_isp->rxd_epoch, 0);
  5972. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5973. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5974. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5975. CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
  5976. ctx->state, ctx->ctx_id);
  5977. if (!stop_cmd) {
  5978. rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL);
  5979. if (rc)
  5980. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  5981. }
  5982. return rc;
  5983. }
  5984. static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
  5985. struct cam_start_stop_dev_cmd *cmd)
  5986. {
  5987. int rc = 0;
  5988. struct cam_isp_context *ctx_isp =
  5989. (struct cam_isp_context *)ctx->ctx_priv;
  5990. __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
  5991. ctx_isp->init_received = false;
  5992. ctx->state = CAM_CTX_ACQUIRED;
  5993. trace_cam_context_state("ISP", ctx);
  5994. return rc;
  5995. }
  5996. static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
  5997. struct cam_release_dev_cmd *cmd)
  5998. {
  5999. int rc = 0;
  6000. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6001. if (rc)
  6002. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6003. rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
  6004. if (rc)
  6005. CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
  6006. return rc;
  6007. }
  6008. static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
  6009. void *cmd)
  6010. {
  6011. int rc = 0;
  6012. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6013. if (rc)
  6014. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6015. rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
  6016. if (rc)
  6017. CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
  6018. return rc;
  6019. }
  6020. static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
  6021. {
  6022. int rc = 0;
  6023. struct cam_hw_cmd_args hw_cmd_args;
  6024. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6025. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6026. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6027. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
  6028. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6029. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6030. &hw_cmd_args);
  6031. return rc;
  6032. }
  6033. static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
  6034. {
  6035. int rc = 0;
  6036. struct cam_hw_cmd_args hw_cmd_args;
  6037. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6038. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6039. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6040. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6041. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6042. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6043. &hw_cmd_args);
  6044. return rc;
  6045. }
  6046. static int __cam_isp_ctx_handle_sof_freeze_evt(
  6047. struct cam_context *ctx)
  6048. {
  6049. int rc = 0;
  6050. struct cam_hw_cmd_args hw_cmd_args;
  6051. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6052. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6053. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6054. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
  6055. isp_hw_cmd_args.u.sof_irq_enable = 1;
  6056. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6057. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6058. &hw_cmd_args);
  6059. return rc;
  6060. }
  6061. static int __cam_isp_ctx_reset_and_recover(
  6062. bool skip_resume, struct cam_context *ctx)
  6063. {
  6064. int rc = 0;
  6065. struct cam_isp_context *ctx_isp =
  6066. (struct cam_isp_context *)ctx->ctx_priv;
  6067. struct cam_isp_stop_args stop_isp;
  6068. struct cam_hw_stop_args stop_args;
  6069. struct cam_isp_start_args start_isp;
  6070. struct cam_hw_cmd_args hw_cmd_args;
  6071. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6072. struct cam_ctx_request *req;
  6073. struct cam_isp_ctx_req *req_isp;
  6074. spin_lock(&ctx->lock);
  6075. if (ctx_isp->active_req_cnt) {
  6076. spin_unlock(&ctx->lock);
  6077. CAM_WARN(CAM_ISP,
  6078. "Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done",
  6079. ctx_isp->active_req_cnt, ctx->ctx_id,
  6080. ctx->link_hdl, ctx_isp->recovery_req_id);
  6081. goto end;
  6082. }
  6083. if (ctx->state != CAM_CTX_ACTIVATED) {
  6084. spin_unlock(&ctx->lock);
  6085. CAM_ERR(CAM_ISP,
  6086. "In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld",
  6087. ctx->state, ctx->ctx_id,
  6088. ctx->link_hdl, ctx_isp->recovery_req_id);
  6089. rc = -EINVAL;
  6090. goto end;
  6091. }
  6092. if (list_empty(&ctx->pending_req_list)) {
  6093. /* Cannot start with no request */
  6094. spin_unlock(&ctx->lock);
  6095. CAM_ERR(CAM_ISP,
  6096. "Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x",
  6097. ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl);
  6098. rc = -EFAULT;
  6099. goto end;
  6100. }
  6101. spin_unlock(&ctx->lock);
  6102. if (!ctx_isp->hw_ctx) {
  6103. CAM_ERR(CAM_ISP,
  6104. "Invalid hw context pointer ctx: %u on link: 0x%x",
  6105. ctx->ctx_id, ctx->link_hdl);
  6106. rc = -EFAULT;
  6107. goto end;
  6108. }
  6109. /* Block all events till HW is resumed */
  6110. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6111. req = list_first_entry(&ctx->pending_req_list,
  6112. struct cam_ctx_request, list);
  6113. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6114. req_isp->bubble_detected = false;
  6115. CAM_INFO(CAM_ISP,
  6116. "Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x",
  6117. req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl);
  6118. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6119. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6120. stop_isp.stop_only = true;
  6121. stop_isp.is_internal_stop = true;
  6122. stop_args.args = (void *)&stop_isp;
  6123. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6124. &stop_args);
  6125. if (rc) {
  6126. CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u",
  6127. rc, ctx->ctx_id);
  6128. goto end;
  6129. }
  6130. CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x",
  6131. ctx->ctx_id, ctx->link_hdl);
  6132. /* API provides provision to stream off and not resume as well in case of fatal errors */
  6133. if (skip_resume) {
  6134. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6135. CAM_INFO(CAM_ISP,
  6136. "Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x",
  6137. ctx->ctx_id, ctx_isp->last_applied_req_id,
  6138. ctx_isp->recovery_req_id, ctx->link_hdl);
  6139. goto end;
  6140. }
  6141. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6142. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6143. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6144. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6145. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6146. &hw_cmd_args);
  6147. if (rc) {
  6148. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u", rc, ctx->ctx_id);
  6149. goto end;
  6150. }
  6151. CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x",
  6152. ctx->ctx_id, ctx->link_hdl);
  6153. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6154. start_isp.hw_config.request_id = req->request_id;
  6155. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6156. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6157. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6158. start_isp.hw_config.init_packet = 1;
  6159. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ;
  6160. start_isp.hw_config.cdm_reset_before_apply = false;
  6161. start_isp.start_only = true;
  6162. start_isp.is_internal_start = true;
  6163. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  6164. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6165. CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
  6166. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6167. &start_isp);
  6168. if (rc) {
  6169. CAM_ERR(CAM_ISP, "Start HW failed");
  6170. ctx->state = CAM_CTX_READY;
  6171. goto end;
  6172. }
  6173. /* IQ applied for this request, on next trigger skip IQ cfg */
  6174. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  6175. CAM_DBG(CAM_ISP, "Internal Start HW success ctx %u on link: 0x%x",
  6176. ctx->ctx_id, ctx->link_hdl);
  6177. end:
  6178. return rc;
  6179. }
  6180. static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
  6181. struct cam_req_mgr_link_evt_data *link_evt_data)
  6182. {
  6183. int rc = 0;
  6184. switch (link_evt_data->evt_type) {
  6185. case CAM_REQ_MGR_LINK_EVT_ERR:
  6186. /* No handling */
  6187. break;
  6188. case CAM_REQ_MGR_LINK_EVT_PAUSE:
  6189. rc = __cam_isp_ctx_link_pause(ctx);
  6190. break;
  6191. case CAM_REQ_MGR_LINK_EVT_RESUME:
  6192. rc = __cam_isp_ctx_link_resume(ctx);
  6193. break;
  6194. case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
  6195. rc = __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  6196. break;
  6197. case CAM_REQ_MGR_LINK_EVT_STALLED:
  6198. if (ctx->state == CAM_CTX_ACTIVATED)
  6199. rc = __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  6200. break;
  6201. default:
  6202. CAM_WARN(CAM_ISP,
  6203. "Unsupported event type: 0x%x on ctx: %u",
  6204. link_evt_data->evt_type, ctx->ctx_id);
  6205. rc = -EINVAL;
  6206. break;
  6207. }
  6208. return rc;
  6209. }
  6210. static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
  6211. struct cam_req_mgr_core_dev_link_setup *unlink)
  6212. {
  6213. int rc = 0;
  6214. CAM_WARN(CAM_ISP,
  6215. "Received unlink in activated state. It's unexpected");
  6216. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6217. if (rc)
  6218. CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
  6219. rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
  6220. if (rc)
  6221. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6222. return rc;
  6223. }
  6224. static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
  6225. struct cam_req_mgr_apply_request *apply)
  6226. {
  6227. int rc = 0;
  6228. struct cam_ctx_ops *ctx_ops = NULL;
  6229. struct cam_isp_context *ctx_isp =
  6230. (struct cam_isp_context *) ctx->ctx_priv;
  6231. trace_cam_apply_req("ISP", ctx->ctx_id, apply->request_id, apply->link_hdl);
  6232. CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld",
  6233. __cam_isp_ctx_substate_val_to_type(
  6234. ctx_isp->substate_activated), apply->request_id);
  6235. ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
  6236. if (ctx_ops->crm_ops.apply_req) {
  6237. rc = ctx_ops->crm_ops.apply_req(ctx, apply);
  6238. } else {
  6239. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6240. "No handle function in activated Substate[%s]",
  6241. __cam_isp_ctx_substate_val_to_type(
  6242. ctx_isp->substate_activated));
  6243. rc = -EFAULT;
  6244. }
  6245. if (rc)
  6246. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6247. "Apply failed in active Substate[%s] rc %d",
  6248. __cam_isp_ctx_substate_val_to_type(
  6249. ctx_isp->substate_activated), rc);
  6250. return rc;
  6251. }
  6252. static int __cam_isp_ctx_apply_default_settings(
  6253. struct cam_context *ctx,
  6254. struct cam_req_mgr_apply_request *apply)
  6255. {
  6256. int rc = 0;
  6257. struct cam_ctx_ops *ctx_ops = NULL;
  6258. struct cam_isp_context *ctx_isp =
  6259. (struct cam_isp_context *) ctx->ctx_priv;
  6260. if (!ctx_isp->use_default_apply)
  6261. return 0;
  6262. if (!(apply->trigger_point & ctx_isp->subscribe_event)) {
  6263. CAM_WARN(CAM_ISP,
  6264. "Trigger: %u not subscribed for: %u",
  6265. apply->trigger_point, ctx_isp->subscribe_event);
  6266. return 0;
  6267. }
  6268. /* Allow apply default settings for IFE only at SOF */
  6269. if (apply->trigger_point != CAM_TRIGGER_POINT_SOF)
  6270. return 0;
  6271. if ((ctx_isp->aeb_enabled) && (atomic_read(&ctx_isp->internal_recovery_set)))
  6272. return __cam_isp_ctx_reset_and_recover(false, ctx);
  6273. CAM_DBG(CAM_ISP,
  6274. "Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x",
  6275. ctx_isp->substate_activated, apply->request_id,
  6276. ctx->ctx_id, ctx->link_hdl);
  6277. ctx_ops = &ctx_isp->substate_machine[
  6278. ctx_isp->substate_activated];
  6279. if (ctx_ops->crm_ops.notify_frame_skip) {
  6280. rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply);
  6281. } else {
  6282. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6283. "No handle function in activated substate %d",
  6284. ctx_isp->substate_activated);
  6285. rc = -EFAULT;
  6286. }
  6287. if (rc)
  6288. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6289. "Apply default failed in active substate %d rc %d",
  6290. ctx_isp->substate_activated, rc);
  6291. return rc;
  6292. }
  6293. static int __cam_isp_ctx_handle_irq_in_activated(void *context,
  6294. uint32_t evt_id, void *evt_data)
  6295. {
  6296. int rc = 0;
  6297. struct cam_isp_ctx_irq_ops *irq_ops = NULL;
  6298. struct cam_context *ctx = (struct cam_context *)context;
  6299. struct cam_isp_context *ctx_isp =
  6300. (struct cam_isp_context *)ctx->ctx_priv;
  6301. spin_lock(&ctx->lock);
  6302. trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
  6303. __cam_isp_ctx_get_event_ts(evt_id, evt_data));
  6304. CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d, ctx:%d",
  6305. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6306. ctx_isp->substate_activated), evt_id,
  6307. ctx->ctx_id);
  6308. irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
  6309. if (irq_ops->irq_ops[evt_id]) {
  6310. rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
  6311. } else {
  6312. CAM_DBG(CAM_ISP,
  6313. "No handle function for Substate[%s], evt id %d, ctx:%d",
  6314. __cam_isp_ctx_substate_val_to_type(
  6315. ctx_isp->substate_activated), evt_id,
  6316. ctx->ctx_id);
  6317. if (isp_ctx_debug.enable_state_monitor_dump)
  6318. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  6319. }
  6320. CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s], ctx:%d",
  6321. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6322. ctx_isp->substate_activated), ctx->ctx_id);
  6323. spin_unlock(&ctx->lock);
  6324. return rc;
  6325. }
  6326. /* top state machine */
  6327. static struct cam_ctx_ops
  6328. cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  6329. /* Uninit */
  6330. {
  6331. .ioctl_ops = {},
  6332. .crm_ops = {},
  6333. .irq_ops = NULL,
  6334. },
  6335. /* Available */
  6336. {
  6337. .ioctl_ops = {
  6338. .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
  6339. },
  6340. .crm_ops = {},
  6341. .irq_ops = NULL,
  6342. },
  6343. /* Acquired */
  6344. {
  6345. .ioctl_ops = {
  6346. .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
  6347. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  6348. .config_dev = __cam_isp_ctx_config_dev_in_acquired,
  6349. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6350. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  6351. },
  6352. .crm_ops = {
  6353. .link = __cam_isp_ctx_link_in_acquired,
  6354. .unlink = __cam_isp_ctx_unlink_in_acquired,
  6355. .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
  6356. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  6357. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6358. },
  6359. .irq_ops = NULL,
  6360. .pagefault_ops = cam_isp_context_dump_requests,
  6361. .dumpinfo_ops = cam_isp_context_info_dump,
  6362. },
  6363. /* Ready */
  6364. {
  6365. .ioctl_ops = {
  6366. .start_dev = __cam_isp_ctx_start_dev_in_ready,
  6367. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  6368. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  6369. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6370. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  6371. },
  6372. .crm_ops = {
  6373. .unlink = __cam_isp_ctx_unlink_in_ready,
  6374. .flush_req = __cam_isp_ctx_flush_req_in_ready,
  6375. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6376. },
  6377. .irq_ops = NULL,
  6378. .pagefault_ops = cam_isp_context_dump_requests,
  6379. .dumpinfo_ops = cam_isp_context_info_dump,
  6380. },
  6381. /* Flushed */
  6382. {
  6383. .ioctl_ops = {
  6384. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  6385. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  6386. .config_dev = __cam_isp_ctx_config_dev_in_flushed,
  6387. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  6388. },
  6389. .crm_ops = {
  6390. .unlink = __cam_isp_ctx_unlink_in_ready,
  6391. .process_evt = __cam_isp_ctx_process_evt,
  6392. .flush_req = __cam_isp_ctx_flush_req_in_flushed_state,
  6393. },
  6394. .irq_ops = NULL,
  6395. .pagefault_ops = cam_isp_context_dump_requests,
  6396. .dumpinfo_ops = cam_isp_context_info_dump,
  6397. },
  6398. /* Activated */
  6399. {
  6400. .ioctl_ops = {
  6401. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  6402. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  6403. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  6404. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6405. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  6406. },
  6407. .crm_ops = {
  6408. .unlink = __cam_isp_ctx_unlink_in_activated,
  6409. .apply_req = __cam_isp_ctx_apply_req,
  6410. .notify_frame_skip =
  6411. __cam_isp_ctx_apply_default_settings,
  6412. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  6413. .process_evt = __cam_isp_ctx_process_evt,
  6414. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6415. },
  6416. .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
  6417. .pagefault_ops = cam_isp_context_dump_requests,
  6418. .dumpinfo_ops = cam_isp_context_info_dump,
  6419. .recovery_ops = cam_isp_context_hw_recovery,
  6420. },
  6421. };
  6422. static int cam_isp_context_hw_recovery(void *priv, void *data)
  6423. {
  6424. struct cam_context *ctx = priv;
  6425. int rc = -EPERM;
  6426. if (ctx->hw_mgr_intf->hw_recovery)
  6427. rc = ctx->hw_mgr_intf->hw_recovery(ctx->hw_mgr_intf->hw_mgr_priv, data);
  6428. else
  6429. CAM_ERR(CAM_ISP, "hw mgr doesn't support recovery");
  6430. return rc;
  6431. }
  6432. static int cam_isp_context_dump_requests(void *data,
  6433. struct cam_smmu_pf_info *pf_info)
  6434. {
  6435. struct cam_context *ctx = (struct cam_context *)data;
  6436. struct cam_ctx_request *req = NULL;
  6437. struct cam_ctx_request *req_temp = NULL;
  6438. struct cam_isp_ctx_req *req_isp = NULL;
  6439. struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
  6440. struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
  6441. struct cam_req_mgr_message req_msg = {0};
  6442. struct cam_isp_context *ctx_isp;
  6443. uint32_t resource_type = 0;
  6444. bool mem_found = false, ctx_found = false, send_error = false;
  6445. int rc = 0;
  6446. struct cam_isp_context *isp_ctx =
  6447. (struct cam_isp_context *)ctx->ctx_priv;
  6448. if (!isp_ctx) {
  6449. CAM_ERR(CAM_ISP, "Invalid isp ctx");
  6450. return -EINVAL;
  6451. }
  6452. CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
  6453. ctx->ctx_id, ctx->state);
  6454. list_for_each_entry_safe(req, req_temp,
  6455. &ctx->active_req_list, list) {
  6456. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6457. hw_update_data = &req_isp->hw_update_data;
  6458. pf_dbg_entry = &(req->pf_data);
  6459. CAM_INFO(CAM_ISP, "Active List: req_id : %lld ",
  6460. req->request_id);
  6461. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  6462. &mem_found, &ctx_found, &resource_type, pf_info);
  6463. if (rc)
  6464. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  6465. if (ctx_found)
  6466. send_error = true;
  6467. }
  6468. CAM_INFO(CAM_ISP, "Iterating over wait_list of isp ctx %d state %d",
  6469. ctx->ctx_id, ctx->state);
  6470. list_for_each_entry_safe(req, req_temp,
  6471. &ctx->wait_req_list, list) {
  6472. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6473. hw_update_data = &req_isp->hw_update_data;
  6474. pf_dbg_entry = &(req->pf_data);
  6475. CAM_INFO(CAM_ISP, "Wait List: req_id : %lld ", req->request_id);
  6476. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  6477. &mem_found, &ctx_found, &resource_type, pf_info);
  6478. if (rc)
  6479. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  6480. if (ctx_found)
  6481. send_error = true;
  6482. }
  6483. /*
  6484. * In certain scenarios we observe both overflow and SMMU pagefault
  6485. * for a particular request. If overflow is handled before page fault
  6486. * we need to traverse through pending request list because if
  6487. * bubble recovery is enabled on any request we move that request
  6488. * and all the subsequent requests to the pending list while handling
  6489. * overflow error.
  6490. */
  6491. CAM_INFO(CAM_ISP,
  6492. "Iterating over pending req list of isp ctx %d state %d",
  6493. ctx->ctx_id, ctx->state);
  6494. list_for_each_entry_safe(req, req_temp,
  6495. &ctx->pending_req_list, list) {
  6496. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6497. hw_update_data = &req_isp->hw_update_data;
  6498. pf_dbg_entry = &(req->pf_data);
  6499. CAM_INFO(CAM_ISP, "Pending List: req_id : %lld ",
  6500. req->request_id);
  6501. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  6502. &mem_found, &ctx_found, &resource_type, pf_info);
  6503. if (rc)
  6504. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  6505. if (ctx_found)
  6506. send_error = true;
  6507. }
  6508. if (resource_type) {
  6509. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  6510. CAM_ERR(CAM_ISP,
  6511. "Page fault on resource:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  6512. __cam_isp_resource_handle_id_to_type(
  6513. ctx_isp->isp_device_type, resource_type),
  6514. resource_type, ctx->ctx_id, ctx_isp->frame_id,
  6515. ctx_isp->reported_req_id, ctx_isp->last_applied_req_id);
  6516. }
  6517. if (send_error) {
  6518. CAM_INFO(CAM_ISP,
  6519. "page fault notifying to umd ctx %u session_hdl:%d device_hdl:%d link_hdl:%d",
  6520. ctx->ctx_id, ctx->session_hdl,
  6521. ctx->dev_hdl, ctx->link_hdl);
  6522. req_msg.session_hdl = ctx->session_hdl;
  6523. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  6524. req_msg.u.err_msg.error_type =
  6525. CAM_REQ_MGR_ERROR_TYPE_PAGE_FAULT;
  6526. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  6527. req_msg.u.err_msg.request_id = 0;
  6528. req_msg.u.err_msg.resource_size = 0x0;
  6529. req_msg.u.err_msg.error_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  6530. if (cam_req_mgr_notify_message(&req_msg,
  6531. V4L_EVENT_CAM_REQ_MGR_ERROR,
  6532. V4L_EVENT_CAM_REQ_MGR_EVENT))
  6533. CAM_ERR(CAM_ISP,
  6534. "could not send page fault notification ctx %u session_hdl:%d device_hdl:%d link_hdl:%d",
  6535. ctx->ctx_id, ctx->session_hdl,
  6536. ctx->dev_hdl, ctx->link_hdl);
  6537. }
  6538. return rc;
  6539. }
  6540. static int cam_isp_context_debug_register(void)
  6541. {
  6542. int rc = 0;
  6543. struct dentry *dbgfileptr = NULL;
  6544. if (!cam_debugfs_available())
  6545. return 0;
  6546. rc = cam_debugfs_create_subdir("isp_ctx", &dbgfileptr);
  6547. if (rc) {
  6548. CAM_ERR(CAM_ISP, "DebugFS could not create directory!");
  6549. return rc;
  6550. }
  6551. /* Store parent inode for cleanup in caller */
  6552. isp_ctx_debug.dentry = dbgfileptr;
  6553. debugfs_create_u32("enable_state_monitor_dump", 0644,
  6554. isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
  6555. debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
  6556. isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
  6557. debugfs_create_bool("disable_internal_recovery", 0644,
  6558. isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery);
  6559. return 0;
  6560. }
  6561. int cam_isp_context_init(struct cam_isp_context *ctx,
  6562. struct cam_context *ctx_base,
  6563. struct cam_req_mgr_kmd_ops *crm_node_intf,
  6564. struct cam_hw_mgr_intf *hw_intf,
  6565. uint32_t ctx_id,
  6566. uint32_t isp_device_type,
  6567. int img_iommu_hdl)
  6568. {
  6569. int rc = -1;
  6570. int i;
  6571. if (!ctx || !ctx_base) {
  6572. CAM_ERR(CAM_ISP, "Invalid Context");
  6573. goto err;
  6574. }
  6575. /* ISP context setup */
  6576. memset(ctx, 0, sizeof(*ctx));
  6577. ctx->base = ctx_base;
  6578. ctx->frame_id = 0;
  6579. ctx->custom_enabled = false;
  6580. ctx->use_frame_header_ts = false;
  6581. ctx->use_default_apply = false;
  6582. ctx->active_req_cnt = 0;
  6583. ctx->reported_req_id = 0;
  6584. ctx->bubble_frame_cnt = 0;
  6585. ctx->req_info.last_bufdone_req_id = 0;
  6586. ctx->v4l2_event_sub_ids = 0;
  6587. ctx->hw_ctx = NULL;
  6588. ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  6589. ctx->substate_machine = cam_isp_ctx_activated_state_machine;
  6590. ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
  6591. ctx->init_timestamp = jiffies_to_msecs(jiffies);
  6592. ctx->isp_device_type = isp_device_type;
  6593. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  6594. ctx->req_base[i].req_priv = &ctx->req_isp[i];
  6595. ctx->req_isp[i].base = &ctx->req_base[i];
  6596. }
  6597. /* camera context setup */
  6598. rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
  6599. crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX, img_iommu_hdl);
  6600. if (rc) {
  6601. CAM_ERR(CAM_ISP, "Camera Context Base init failed");
  6602. goto err;
  6603. }
  6604. /* link camera context with isp context */
  6605. ctx_base->state_machine = cam_isp_ctx_top_state_machine;
  6606. ctx_base->ctx_priv = ctx;
  6607. /* initializing current state for error logging */
  6608. for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
  6609. ctx->cam_isp_ctx_state_monitor[i].curr_state =
  6610. CAM_ISP_CTX_ACTIVATED_MAX;
  6611. }
  6612. atomic64_set(&ctx->state_monitor_head, -1);
  6613. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6614. atomic64_set(&ctx->event_record_head[i], -1);
  6615. if (!isp_ctx_debug.dentry)
  6616. cam_isp_context_debug_register();
  6617. err:
  6618. return rc;
  6619. }
  6620. int cam_isp_context_deinit(struct cam_isp_context *ctx)
  6621. {
  6622. if (ctx->base)
  6623. cam_context_deinit(ctx->base);
  6624. if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
  6625. CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid",
  6626. __cam_isp_ctx_substate_val_to_type(
  6627. ctx->substate_activated));
  6628. isp_ctx_debug.dentry = NULL;
  6629. memset(ctx, 0, sizeof(*ctx));
  6630. return 0;
  6631. }