cam_isp_context.c 241 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/videodev2.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ratelimit.h>
  11. #include "cam_mem_mgr.h"
  12. #include "cam_sync_api.h"
  13. #include "cam_req_mgr_dev.h"
  14. #include "cam_trace.h"
  15. #include "cam_debug_util.h"
  16. #include "cam_packet_util.h"
  17. #include "cam_context_utils.h"
  18. #include "cam_cdm_util.h"
  19. #include "cam_isp_context.h"
  20. #include "cam_common_util.h"
  21. #include "cam_req_mgr_debug.h"
  22. #include "cam_cpas_api.h"
  23. #include "cam_ife_hw_mgr.h"
  24. static const char isp_dev_name[] = "cam-isp";
  25. static struct cam_isp_ctx_debug isp_ctx_debug;
  26. #define INC_HEAD(head, max_entries, ret) \
  27. div_u64_rem(atomic64_add_return(1, head),\
  28. max_entries, (ret))
  29. static int cam_isp_context_dump_requests(void *data,
  30. void *pf_args);
  31. static int cam_isp_context_hw_recovery(void *priv, void *data);
  32. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  33. struct cam_start_stop_dev_cmd *cmd);
  34. static void __cam_isp_ctx_dump_state_monitor_array(
  35. struct cam_isp_context *ctx_isp);
  36. static const char *__cam_isp_hw_evt_val_to_type(
  37. uint32_t evt_id);
  38. static const char *__cam_isp_ctx_substate_val_to_type(
  39. enum cam_isp_ctx_activated_substate type);
  40. static int __cam_isp_ctx_check_deferred_buf_done(
  41. struct cam_isp_context *ctx_isp,
  42. struct cam_isp_hw_done_event_data *done,
  43. uint32_t bubble_state);
  44. static const char *__cam_isp_evt_val_to_type(
  45. uint32_t evt_id)
  46. {
  47. switch (evt_id) {
  48. case CAM_ISP_CTX_EVENT_SUBMIT:
  49. return "SUBMIT";
  50. case CAM_ISP_CTX_EVENT_APPLY:
  51. return "APPLY";
  52. case CAM_ISP_CTX_EVENT_EPOCH:
  53. return "EPOCH";
  54. case CAM_ISP_CTX_EVENT_RUP:
  55. return "RUP";
  56. case CAM_ISP_CTX_EVENT_BUFDONE:
  57. return "BUFDONE";
  58. default:
  59. return "CAM_ISP_EVENT_INVALID";
  60. }
  61. }
  62. static void __cam_isp_ctx_update_event_record(
  63. struct cam_isp_context *ctx_isp,
  64. enum cam_isp_ctx_event event,
  65. struct cam_ctx_request *req)
  66. {
  67. int iterator = 0;
  68. ktime_t cur_time;
  69. struct cam_isp_ctx_req *req_isp;
  70. if (!ctx_isp) {
  71. CAM_ERR(CAM_ISP, "Invalid Args");
  72. return;
  73. }
  74. switch (event) {
  75. case CAM_ISP_CTX_EVENT_EPOCH:
  76. case CAM_ISP_CTX_EVENT_RUP:
  77. case CAM_ISP_CTX_EVENT_BUFDONE:
  78. break;
  79. case CAM_ISP_CTX_EVENT_SUBMIT:
  80. case CAM_ISP_CTX_EVENT_APPLY:
  81. if (!req) {
  82. CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
  83. return;
  84. }
  85. break;
  86. default:
  87. break;
  88. }
  89. INC_HEAD(&ctx_isp->event_record_head[event],
  90. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
  91. cur_time = ktime_get();
  92. if (req) {
  93. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  94. ctx_isp->event_record[event][iterator].req_id =
  95. req->request_id;
  96. req_isp->event_timestamp[event] = cur_time;
  97. } else {
  98. ctx_isp->event_record[event][iterator].req_id = 0;
  99. }
  100. ctx_isp->event_record[event][iterator].timestamp = cur_time;
  101. }
  102. static void *cam_isp_ctx_user_dump_events(
  103. void *dump_struct, uint8_t *addr_ptr)
  104. {
  105. uint64_t *addr;
  106. struct cam_isp_context_event_record *record;
  107. struct timespec64 ts;
  108. record = (struct cam_isp_context_event_record *)dump_struct;
  109. addr = (uint64_t *)addr_ptr;
  110. ts = ktime_to_timespec64(record->timestamp);
  111. *addr++ = record->req_id;
  112. *addr++ = ts.tv_sec;
  113. *addr++ = ts.tv_nsec / NSEC_PER_USEC;
  114. return addr;
  115. }
  116. static int __cam_isp_ctx_dump_event_record(
  117. struct cam_isp_context *ctx_isp,
  118. struct cam_common_hw_dump_args *dump_args)
  119. {
  120. int i, j, rc = 0;
  121. int index;
  122. size_t remain_len;
  123. uint32_t oldest_entry, num_entries;
  124. uint32_t min_len;
  125. uint64_t state_head;
  126. struct cam_isp_context_event_record *record;
  127. if (!dump_args || !ctx_isp) {
  128. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  129. dump_args, ctx_isp);
  130. return -EINVAL;
  131. }
  132. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  133. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  134. if (state_head == -1) {
  135. return 0;
  136. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  137. num_entries = state_head + 1;
  138. oldest_entry = 0;
  139. } else {
  140. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  141. div_u64_rem(state_head + 1,
  142. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  143. &oldest_entry);
  144. }
  145. index = oldest_entry;
  146. if (dump_args->buf_len <= dump_args->offset) {
  147. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  148. dump_args->buf_len, dump_args->offset);
  149. return -ENOSPC;
  150. }
  151. min_len = sizeof(struct cam_isp_context_dump_header) +
  152. ((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
  153. sizeof(uint64_t));
  154. remain_len = dump_args->buf_len - dump_args->offset;
  155. if (remain_len < min_len) {
  156. CAM_WARN(CAM_ISP,
  157. "Dump buffer exhaust remain %zu min %u",
  158. remain_len, min_len);
  159. return -ENOSPC;
  160. }
  161. for (j = 0; j < num_entries; j++) {
  162. record = &ctx_isp->event_record[i][index];
  163. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_events,
  164. record, sizeof(uint64_t), "ISP_EVT_%s:",
  165. __cam_isp_evt_val_to_type(i));
  166. if (rc) {
  167. CAM_ERR(CAM_ISP,
  168. "CAM_ISP_CONTEXT DUMP_EVENT_RECORD: Dump failed, rc: %d",
  169. rc);
  170. return rc;
  171. }
  172. index = (index + 1) %
  173. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  174. }
  175. }
  176. return rc;
  177. }
  178. static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req,
  179. uint8_t *start_addr, uint8_t *end_addr,
  180. unsigned long *bytes_updated)
  181. {
  182. struct cam_isp_ctx_req_mini_dump *req_md;
  183. struct cam_buf_io_cfg *io_cfg;
  184. struct cam_isp_ctx_req *req_isp;
  185. struct cam_packet *packet = NULL;
  186. unsigned long bytes_required = 0;
  187. bytes_required = sizeof(*req_md);
  188. *bytes_updated = 0;
  189. if (start_addr + bytes_required > end_addr)
  190. return;
  191. req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr;
  192. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  193. req_md->num_acked = req_isp->num_acked;
  194. req_md->num_deferred_acks = req_isp->num_deferred_acks;
  195. req_md->bubble_report = req_isp->bubble_report;
  196. req_md->bubble_detected = req_isp->bubble_detected;
  197. req_md->reapply_type = req_isp->reapply_type;
  198. req_md->request_id = req->request_id;
  199. *bytes_updated += bytes_required;
  200. if (req_isp->num_fence_map_out) {
  201. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  202. req_isp->num_fence_map_out;
  203. if (start_addr + *bytes_updated + bytes_required > end_addr)
  204. return;
  205. req_md->map_out = (struct cam_hw_fence_map_entry *)
  206. ((uint8_t *)start_addr + *bytes_updated);
  207. memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required);
  208. req_md->num_fence_map_out = req_isp->num_fence_map_out;
  209. *bytes_updated += bytes_required;
  210. }
  211. if (req_isp->num_fence_map_in) {
  212. bytes_required = sizeof(struct cam_hw_fence_map_entry) *
  213. req_isp->num_fence_map_in;
  214. if (start_addr + *bytes_updated + bytes_required > end_addr)
  215. return;
  216. req_md->map_in = (struct cam_hw_fence_map_entry *)
  217. ((uint8_t *)start_addr + *bytes_updated);
  218. memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required);
  219. req_md->num_fence_map_in = req_isp->num_fence_map_in;
  220. *bytes_updated += bytes_required;
  221. }
  222. packet = req_isp->hw_update_data.packet;
  223. if (packet && packet->num_io_configs) {
  224. bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg);
  225. if (start_addr + *bytes_updated + bytes_required > end_addr)
  226. return;
  227. io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
  228. packet->io_configs_offset / 4);
  229. req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated);
  230. memcpy(req_md->io_cfg, io_cfg, bytes_required);
  231. *bytes_updated += bytes_required;
  232. req_md->num_io_cfg = packet->num_io_configs;
  233. }
  234. }
  235. static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
  236. {
  237. struct cam_isp_ctx_mini_dump_info *md;
  238. struct cam_isp_context *ctx_isp;
  239. struct cam_context *ctx;
  240. struct cam_ctx_request *req, *req_temp;
  241. struct cam_hw_mini_dump_args *dump_args;
  242. uint8_t *start_addr;
  243. uint8_t *end_addr;
  244. unsigned long total_bytes = 0;
  245. unsigned long bytes_updated = 0;
  246. uint32_t i;
  247. if (!priv || !args) {
  248. CAM_ERR(CAM_ISP, "invalid params");
  249. return 0;
  250. }
  251. dump_args = (struct cam_hw_mini_dump_args *)args;
  252. if (dump_args->len < sizeof(*md)) {
  253. CAM_ERR(CAM_ISP,
  254. "In sufficient size received %lu required size: %zu",
  255. dump_args->len, sizeof(*md));
  256. return 0;
  257. }
  258. ctx = (struct cam_context *)priv;
  259. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  260. start_addr = (uint8_t *)dump_args->start_addr;
  261. end_addr = start_addr + dump_args->len;
  262. md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr;
  263. md->sof_timestamp_val = ctx_isp->sof_timestamp_val;
  264. md->boot_timestamp = ctx_isp->boot_timestamp;
  265. md->last_sof_timestamp = ctx_isp->last_sof_timestamp;
  266. md->init_timestamp = ctx_isp->init_timestamp;
  267. md->frame_id = ctx_isp->frame_id;
  268. md->reported_req_id = ctx_isp->reported_req_id;
  269. md->last_applied_req_id = ctx_isp->last_applied_req_id;
  270. md->last_bufdone_err_apply_req_id =
  271. ctx_isp->last_bufdone_err_apply_req_id;
  272. md->frame_id_meta = ctx_isp->frame_id_meta;
  273. md->substate_activated = ctx_isp->substate_activated;
  274. md->ctx_id = ctx->ctx_id;
  275. md->subscribe_event = ctx_isp->subscribe_event;
  276. md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt;
  277. md->isp_device_type = ctx_isp->isp_device_type;
  278. md->active_req_cnt = ctx_isp->active_req_cnt;
  279. md->trigger_id = ctx_isp->trigger_id;
  280. md->rdi_only_context = ctx_isp->rdi_only_context;
  281. md->offline_context = ctx_isp->offline_context;
  282. md->hw_acquired = ctx_isp->hw_acquired;
  283. md->init_received = ctx_isp->init_received;
  284. md->split_acquire = ctx_isp->split_acquire;
  285. md->use_frame_header_ts = ctx_isp->use_frame_header_ts;
  286. md->support_consumed_addr = ctx_isp->support_consumed_addr;
  287. md->use_default_apply = ctx_isp->use_default_apply;
  288. md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress);
  289. md->process_bubble = atomic_read(&ctx_isp->process_bubble);
  290. md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
  291. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  292. memcpy(md->event_record[i], ctx_isp->event_record[i],
  293. sizeof(struct cam_isp_context_event_record) *
  294. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
  295. }
  296. total_bytes += sizeof(*md);
  297. if (start_addr + total_bytes >= end_addr)
  298. goto end;
  299. if (!list_empty(&ctx->active_req_list)) {
  300. md->active_list = (struct cam_isp_ctx_req_mini_dump *)
  301. (start_addr + total_bytes);
  302. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  303. bytes_updated = 0;
  304. __cam_isp_ctx_req_mini_dump(req,
  305. (uint8_t *)&md->active_list[md->active_cnt++],
  306. end_addr, &bytes_updated);
  307. total_bytes += bytes_updated;
  308. if ((start_addr + total_bytes >= end_addr))
  309. goto end;
  310. }
  311. }
  312. if (!list_empty(&ctx->wait_req_list)) {
  313. md->wait_list = (struct cam_isp_ctx_req_mini_dump *)
  314. (start_addr + total_bytes);
  315. list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
  316. bytes_updated = 0;
  317. __cam_isp_ctx_req_mini_dump(req,
  318. (uint8_t *)&md->wait_list[md->wait_cnt++],
  319. end_addr, &bytes_updated);
  320. total_bytes += bytes_updated;
  321. if ((start_addr + total_bytes >= end_addr))
  322. goto end;
  323. }
  324. }
  325. if (!list_empty(&ctx->pending_req_list)) {
  326. md->pending_list = (struct cam_isp_ctx_req_mini_dump *)
  327. (start_addr + total_bytes);
  328. list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
  329. bytes_updated = 0;
  330. __cam_isp_ctx_req_mini_dump(req,
  331. (uint8_t *)&md->pending_list[md->pending_cnt++],
  332. end_addr, &bytes_updated);
  333. total_bytes += bytes_updated;
  334. if ((start_addr + total_bytes >= end_addr))
  335. goto end;
  336. }
  337. }
  338. end:
  339. dump_args->bytes_written = total_bytes;
  340. return 0;
  341. }
  342. static void __cam_isp_ctx_update_state_monitor_array(
  343. struct cam_isp_context *ctx_isp,
  344. enum cam_isp_state_change_trigger trigger_type,
  345. uint64_t req_id)
  346. {
  347. int iterator;
  348. INC_HEAD(&ctx_isp->state_monitor_head,
  349. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
  350. ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
  351. ctx_isp->substate_activated;
  352. ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
  353. ctx_isp->frame_id;
  354. ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
  355. trigger_type;
  356. ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
  357. req_id;
  358. ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
  359. jiffies_to_msecs(jiffies) - ctx_isp->init_timestamp;
  360. }
  361. static const char *__cam_isp_ctx_substate_val_to_type(
  362. enum cam_isp_ctx_activated_substate type)
  363. {
  364. switch (type) {
  365. case CAM_ISP_CTX_ACTIVATED_SOF:
  366. return "SOF";
  367. case CAM_ISP_CTX_ACTIVATED_APPLIED:
  368. return "APPLIED";
  369. case CAM_ISP_CTX_ACTIVATED_EPOCH:
  370. return "EPOCH";
  371. case CAM_ISP_CTX_ACTIVATED_BUBBLE:
  372. return "BUBBLE";
  373. case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
  374. return "BUBBLE_APPLIED";
  375. case CAM_ISP_CTX_ACTIVATED_HW_ERROR:
  376. return "HW_ERROR";
  377. case CAM_ISP_CTX_ACTIVATED_HALT:
  378. return "HALT";
  379. default:
  380. return "INVALID";
  381. }
  382. }
  383. static const char *__cam_isp_hw_evt_val_to_type(
  384. uint32_t evt_id)
  385. {
  386. switch (evt_id) {
  387. case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
  388. return "ERROR";
  389. case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED:
  390. return "APPLIED";
  391. case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
  392. return "SOF";
  393. case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
  394. return "REG_UPDATE";
  395. case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
  396. return "EPOCH";
  397. case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
  398. return "EOF";
  399. case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
  400. return "DONE";
  401. case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
  402. return "FLUSH";
  403. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF:
  404. return "SEC_EVT_SOF";
  405. case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH:
  406. return "SEC_EVT_EPOCH";
  407. case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP:
  408. return "OUT_OF_SYNC_FRAME_DROP";
  409. default:
  410. return "CAM_ISP_EVENT_INVALID";
  411. }
  412. }
  413. static void __cam_isp_ctx_dump_state_monitor_array(
  414. struct cam_isp_context *ctx_isp)
  415. {
  416. int i = 0;
  417. int64_t state_head = 0;
  418. uint32_t index, num_entries, oldest_entry;
  419. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  420. if (state_head == -1) {
  421. return;
  422. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  423. num_entries = state_head;
  424. oldest_entry = 0;
  425. } else {
  426. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  427. div_u64_rem(state_head + 1,
  428. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  429. }
  430. CAM_ERR(CAM_ISP,
  431. "Dumping state information for preceding requests");
  432. index = oldest_entry;
  433. for (i = 0; i < num_entries; i++) {
  434. CAM_ERR(CAM_ISP,
  435. "Index[%d] time[%d] : Substate[%s] Frame[%lld] ReqId[%llu] evt_type[%s]",
  436. index,
  437. ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
  438. __cam_isp_ctx_substate_val_to_type(
  439. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  440. ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
  441. ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
  442. __cam_isp_hw_evt_val_to_type(
  443. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  444. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  445. }
  446. }
  447. static void *cam_isp_ctx_user_dump_state_monitor_array_info(
  448. void *dump_struct, uint8_t *addr_ptr)
  449. {
  450. struct cam_isp_context_state_monitor *evt = NULL;
  451. uint64_t *addr;
  452. evt = (struct cam_isp_context_state_monitor *)dump_struct;
  453. addr = (uint64_t *)addr_ptr;
  454. *addr++ = evt->evt_time_stamp;
  455. *addr++ = evt->frame_id;
  456. *addr++ = evt->req_id;
  457. return addr;
  458. }
  459. static int __cam_isp_ctx_user_dump_state_monitor_array(
  460. struct cam_isp_context *ctx_isp,
  461. struct cam_common_hw_dump_args *dump_args)
  462. {
  463. int i, rc = 0;
  464. int index;
  465. uint32_t oldest_entry;
  466. uint32_t num_entries;
  467. uint64_t state_head;
  468. if (!dump_args || !ctx_isp) {
  469. CAM_ERR(CAM_ISP, "Invalid args %pK %pK",
  470. dump_args, ctx_isp);
  471. return -EINVAL;
  472. }
  473. state_head = 0;
  474. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  475. if (state_head == -1) {
  476. return 0;
  477. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  478. num_entries = state_head;
  479. oldest_entry = 0;
  480. } else {
  481. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  482. div_u64_rem(state_head + 1,
  483. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  484. }
  485. CAM_ERR(CAM_ISP,
  486. "Dumping state information for preceding requests");
  487. index = oldest_entry;
  488. for (i = 0; i < num_entries; i++) {
  489. rc = cam_common_user_dump_helper(dump_args,
  490. cam_isp_ctx_user_dump_state_monitor_array_info,
  491. &ctx_isp->cam_isp_ctx_state_monitor[index],
  492. sizeof(uint64_t), "ISP_STATE_MONITOR.%s.%s:",
  493. __cam_isp_ctx_substate_val_to_type(
  494. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  495. __cam_isp_hw_evt_val_to_type(
  496. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  497. if (rc) {
  498. CAM_ERR(CAM_ISP, "CAM ISP CONTEXT: Event record dump failed, rc: %d", rc);
  499. return rc;
  500. }
  501. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  502. }
  503. return rc;
  504. }
  505. static int cam_isp_context_info_dump(void *context,
  506. enum cam_context_dump_id id)
  507. {
  508. struct cam_context *ctx = (struct cam_context *)context;
  509. switch (id) {
  510. case CAM_CTX_DUMP_ACQ_INFO: {
  511. cam_context_dump_hw_acq_info(ctx);
  512. break;
  513. }
  514. default:
  515. CAM_DBG(CAM_ISP, "DUMP id not valid %u", id);
  516. break;
  517. }
  518. return 0;
  519. }
  520. static const char *__cam_isp_ctx_crm_trigger_point_to_string(
  521. int trigger_point)
  522. {
  523. switch (trigger_point) {
  524. case CAM_TRIGGER_POINT_SOF:
  525. return "SOF";
  526. case CAM_TRIGGER_POINT_EOF:
  527. return "EOF";
  528. default:
  529. return "Invalid";
  530. }
  531. }
  532. static int __cam_isp_ctx_notify_trigger_util(
  533. int trigger_type, struct cam_isp_context *ctx_isp)
  534. {
  535. int rc = -EINVAL;
  536. struct cam_context *ctx = ctx_isp->base;
  537. struct cam_req_mgr_trigger_notify notify;
  538. /* Trigger type not supported, return */
  539. if (!(ctx_isp->subscribe_event & trigger_type)) {
  540. CAM_DBG(CAM_ISP,
  541. "%s trigger point not subscribed for in mask: %u in ctx: %u on link: 0x%x last_bufdone: %lld",
  542. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  543. ctx_isp->subscribe_event, ctx->ctx_id, ctx->link_hdl,
  544. ctx_isp->req_info.last_bufdone_req_id);
  545. return 0;
  546. }
  547. /* Skip CRM notify when recovery is in progress */
  548. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  549. CAM_DBG(CAM_ISP,
  550. "Internal recovery in progress skip notifying %s trigger point in ctx: %u on link: 0x%x",
  551. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  552. ctx->ctx_id, ctx->link_hdl);
  553. return 0;
  554. }
  555. notify.link_hdl = ctx->link_hdl;
  556. notify.dev_hdl = ctx->dev_hdl;
  557. notify.frame_id = ctx_isp->frame_id;
  558. notify.trigger = trigger_type;
  559. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  560. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  561. notify.trigger_id = ctx_isp->trigger_id;
  562. CAM_DBG(CAM_ISP,
  563. "Notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld",
  564. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  565. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  566. ctx_isp->req_info.last_bufdone_req_id);
  567. rc = ctx->ctx_crm_intf->notify_trigger(&notify);
  568. if (rc)
  569. CAM_ERR(CAM_ISP,
  570. "Failed to notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld rc: %d",
  571. __cam_isp_ctx_crm_trigger_point_to_string(trigger_type),
  572. ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl,
  573. ctx_isp->req_info.last_bufdone_req_id, rc);
  574. return rc;
  575. }
  576. static int __cam_isp_ctx_notify_v4l2_error_event(
  577. uint32_t error_type, uint32_t error_code,
  578. uint64_t error_request_id, struct cam_context *ctx)
  579. {
  580. int rc = 0;
  581. struct cam_req_mgr_message req_msg;
  582. req_msg.session_hdl = ctx->session_hdl;
  583. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  584. req_msg.u.err_msg.error_type = error_type;
  585. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  586. req_msg.u.err_msg.request_id = error_request_id;
  587. req_msg.u.err_msg.resource_size = 0x0;
  588. req_msg.u.err_msg.error_code = error_code;
  589. CAM_DBG(CAM_ISP,
  590. "v4l2 error event [type: %u code: %u] for req: %llu in ctx: %u on link: 0x%x notified successfully",
  591. error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl);
  592. rc = cam_req_mgr_notify_message(&req_msg,
  593. V4L_EVENT_CAM_REQ_MGR_ERROR,
  594. V4L_EVENT_CAM_REQ_MGR_EVENT);
  595. if (rc)
  596. CAM_ERR(CAM_ISP,
  597. "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu in ctx %u on link: 0x%x",
  598. error_request_id, ctx->ctx_id);
  599. return rc;
  600. }
  601. static int __cam_isp_ctx_notify_error_util(
  602. uint32_t trigger_type, enum cam_req_mgr_device_error error,
  603. uint64_t req_id, struct cam_isp_context *ctx_isp)
  604. {
  605. int rc = -EINVAL;
  606. struct cam_context *ctx = ctx_isp->base;
  607. struct cam_req_mgr_error_notify notify;
  608. notify.link_hdl = ctx->link_hdl;
  609. notify.dev_hdl = ctx->dev_hdl;
  610. notify.req_id = req_id;
  611. notify.error = error;
  612. notify.trigger = trigger_type;
  613. notify.frame_id = ctx_isp->frame_id;
  614. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  615. if (error == CRM_KMD_ERR_BUBBLE)
  616. CAM_WARN(CAM_ISP,
  617. "Notify CRM about bubble req: %llu frame: %llu in ctx: %u on link: 0x%x",
  618. req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  619. else
  620. CAM_ERR(CAM_ISP,
  621. "Notify CRM about fatal error: %u req: %llu frame: %llu in ctx: %u on link: 0x%x",
  622. error, req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl);
  623. rc = ctx->ctx_crm_intf->notify_err(&notify);
  624. if (rc)
  625. CAM_ERR(CAM_ISP,
  626. "Failed to notify error: %u for req: %lu on ctx: %u in link: 0x%x",
  627. error, req_id, ctx->ctx_id, ctx->link_hdl);
  628. return rc;
  629. }
  630. static int __cam_isp_ctx_trigger_reg_dump(
  631. enum cam_hw_mgr_command cmd,
  632. struct cam_context *ctx)
  633. {
  634. int rc = 0;
  635. struct cam_hw_cmd_args hw_cmd_args;
  636. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  637. hw_cmd_args.cmd_type = cmd;
  638. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  639. &hw_cmd_args);
  640. if (rc) {
  641. CAM_ERR(CAM_ISP, "Reg dump on error failed ctx: %u link: 0x%x rc: %d",
  642. ctx->ctx_id, ctx->link_hdl, rc);
  643. goto end;
  644. }
  645. CAM_DBG(CAM_ISP,
  646. "Reg dump type: %u successful in ctx: %u on link: 0x%x",
  647. cmd, ctx->ctx_id, ctx->link_hdl);
  648. end:
  649. return rc;
  650. }
  651. static int __cam_isp_ctx_pause_crm_timer(
  652. struct cam_context *ctx)
  653. {
  654. int rc = -EINVAL;
  655. struct cam_req_mgr_timer_notify timer;
  656. if (!ctx || !ctx->ctx_crm_intf)
  657. goto end;
  658. timer.link_hdl = ctx->link_hdl;
  659. timer.dev_hdl = ctx->dev_hdl;
  660. timer.state = false;
  661. rc = ctx->ctx_crm_intf->notify_timer(&timer);
  662. if (rc) {
  663. CAM_ERR(CAM_ISP, "Failed to pause sof timer in ctx: %u on link: 0x%x",
  664. ctx->ctx_id, ctx->link_hdl);
  665. goto end;
  666. }
  667. CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx: %u link: 0x%x success",
  668. ctx->ctx_id, ctx->link_hdl);
  669. end:
  670. return rc;
  671. }
  672. static inline void __cam_isp_ctx_update_sof_ts_util(
  673. struct cam_isp_hw_sof_event_data *sof_event_data,
  674. struct cam_isp_context *ctx_isp)
  675. {
  676. /* Delayed update, skip if ts is already updated */
  677. if (ctx_isp->sof_timestamp_val == sof_event_data->timestamp)
  678. return;
  679. ctx_isp->frame_id++;
  680. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  681. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  682. }
  683. static int cam_isp_ctx_dump_req(
  684. struct cam_isp_ctx_req *req_isp,
  685. uintptr_t cpu_addr,
  686. size_t buf_len,
  687. size_t *offset,
  688. bool dump_to_buff)
  689. {
  690. int i, rc = 0;
  691. size_t len = 0;
  692. uint32_t *buf_addr;
  693. uint32_t *buf_start, *buf_end;
  694. size_t remain_len = 0;
  695. struct cam_cdm_cmd_buf_dump_info dump_info;
  696. for (i = 0; i < req_isp->num_cfg; i++) {
  697. rc = cam_packet_util_get_cmd_mem_addr(
  698. req_isp->cfg[i].handle, &buf_addr, &len);
  699. if (rc) {
  700. CAM_ERR_RATE_LIMIT(CAM_ISP,
  701. "Failed to get_cmd_mem_addr, rc=%d",
  702. rc);
  703. } else {
  704. if (req_isp->cfg[i].offset >= ((uint32_t)len)) {
  705. CAM_ERR(CAM_ISP,
  706. "Invalid offset exp %u actual %u",
  707. req_isp->cfg[i].offset, (uint32_t)len);
  708. return -EINVAL;
  709. }
  710. remain_len = len - req_isp->cfg[i].offset;
  711. if (req_isp->cfg[i].len >
  712. ((uint32_t)remain_len)) {
  713. CAM_ERR(CAM_ISP,
  714. "Invalid len exp %u remain_len %u",
  715. req_isp->cfg[i].len,
  716. (uint32_t)remain_len);
  717. return -EINVAL;
  718. }
  719. buf_start = (uint32_t *)((uint8_t *) buf_addr +
  720. req_isp->cfg[i].offset);
  721. buf_end = (uint32_t *)((uint8_t *) buf_start +
  722. req_isp->cfg[i].len - 1);
  723. if (dump_to_buff) {
  724. if (!cpu_addr || !offset || !buf_len) {
  725. CAM_ERR(CAM_ISP, "Invalid args");
  726. break;
  727. }
  728. dump_info.src_start = buf_start;
  729. dump_info.src_end = buf_end;
  730. dump_info.dst_start = cpu_addr;
  731. dump_info.dst_offset = *offset;
  732. dump_info.dst_max_size = buf_len;
  733. rc = cam_cdm_util_dump_cmd_bufs_v2(
  734. &dump_info);
  735. *offset = dump_info.dst_offset;
  736. if (rc)
  737. return rc;
  738. } else
  739. cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
  740. }
  741. }
  742. return rc;
  743. }
  744. static int __cam_isp_ctx_enqueue_request_in_order(
  745. struct cam_context *ctx, struct cam_ctx_request *req, bool lock)
  746. {
  747. struct cam_ctx_request *req_current;
  748. struct cam_ctx_request *req_prev;
  749. struct list_head temp_list;
  750. struct cam_isp_context *ctx_isp;
  751. INIT_LIST_HEAD(&temp_list);
  752. if (lock)
  753. spin_lock_bh(&ctx->lock);
  754. if (list_empty(&ctx->pending_req_list)) {
  755. list_add_tail(&req->list, &ctx->pending_req_list);
  756. } else {
  757. list_for_each_entry_safe_reverse(
  758. req_current, req_prev, &ctx->pending_req_list, list) {
  759. if (req->request_id < req_current->request_id) {
  760. list_del_init(&req_current->list);
  761. list_add(&req_current->list, &temp_list);
  762. continue;
  763. } else if (req->request_id == req_current->request_id) {
  764. CAM_WARN(CAM_ISP,
  765. "Received duplicated request %lld",
  766. req->request_id);
  767. }
  768. break;
  769. }
  770. list_add_tail(&req->list, &ctx->pending_req_list);
  771. if (!list_empty(&temp_list)) {
  772. list_for_each_entry_safe(
  773. req_current, req_prev, &temp_list, list) {
  774. list_del_init(&req_current->list);
  775. list_add_tail(&req_current->list,
  776. &ctx->pending_req_list);
  777. }
  778. }
  779. }
  780. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  781. __cam_isp_ctx_update_event_record(ctx_isp,
  782. CAM_ISP_CTX_EVENT_SUBMIT, req);
  783. if (lock)
  784. spin_unlock_bh(&ctx->lock);
  785. return 0;
  786. }
  787. static int __cam_isp_ctx_enqueue_init_request(
  788. struct cam_context *ctx, struct cam_ctx_request *req)
  789. {
  790. int rc = 0;
  791. struct cam_ctx_request *req_old;
  792. struct cam_isp_ctx_req *req_isp_old;
  793. struct cam_isp_ctx_req *req_isp_new;
  794. struct cam_isp_prepare_hw_update_data *req_update_old;
  795. struct cam_isp_prepare_hw_update_data *req_update_new;
  796. struct cam_isp_prepare_hw_update_data *hw_update_data;
  797. spin_lock_bh(&ctx->lock);
  798. if (list_empty(&ctx->pending_req_list)) {
  799. list_add_tail(&req->list, &ctx->pending_req_list);
  800. CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
  801. req->request_id);
  802. goto end;
  803. }
  804. req_old = list_first_entry(&ctx->pending_req_list,
  805. struct cam_ctx_request, list);
  806. req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
  807. req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
  808. if (req_isp_old->hw_update_data.packet_opcode_type ==
  809. CAM_ISP_PACKET_INIT_DEV) {
  810. if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
  811. ctx->max_hw_update_entries) {
  812. CAM_WARN(CAM_ISP,
  813. "Can not merge INIT pkt num_cfgs = %d",
  814. (req_isp_old->num_cfg +
  815. req_isp_new->num_cfg));
  816. rc = -ENOMEM;
  817. }
  818. if (req_isp_old->num_fence_map_out != 0 ||
  819. req_isp_old->num_fence_map_in != 0) {
  820. CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
  821. rc = -EINVAL;
  822. }
  823. if (!rc) {
  824. memcpy(req_isp_old->fence_map_out,
  825. req_isp_new->fence_map_out,
  826. sizeof(req_isp_new->fence_map_out[0])*
  827. req_isp_new->num_fence_map_out);
  828. req_isp_old->num_fence_map_out =
  829. req_isp_new->num_fence_map_out;
  830. memcpy(req_isp_old->fence_map_in,
  831. req_isp_new->fence_map_in,
  832. sizeof(req_isp_new->fence_map_in[0])*
  833. req_isp_new->num_fence_map_in);
  834. req_isp_old->num_fence_map_in =
  835. req_isp_new->num_fence_map_in;
  836. memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
  837. req_isp_new->cfg,
  838. sizeof(req_isp_new->cfg[0]) *
  839. req_isp_new->num_cfg);
  840. req_isp_old->num_cfg += req_isp_new->num_cfg;
  841. memcpy(&req_old->pf_data, &req->pf_data,
  842. sizeof(struct cam_hw_mgr_pf_request_info));
  843. if (req_isp_new->hw_update_data.num_reg_dump_buf) {
  844. req_update_new = &req_isp_new->hw_update_data;
  845. req_update_old = &req_isp_old->hw_update_data;
  846. memcpy(&req_update_old->reg_dump_buf_desc,
  847. &req_update_new->reg_dump_buf_desc,
  848. sizeof(struct cam_cmd_buf_desc) *
  849. req_update_new->num_reg_dump_buf);
  850. req_update_old->num_reg_dump_buf =
  851. req_update_new->num_reg_dump_buf;
  852. }
  853. /* Update HW update params for ePCR */
  854. hw_update_data = &req_isp_new->hw_update_data;
  855. req_isp_old->hw_update_data.frame_header_res_id =
  856. req_isp_new->hw_update_data.frame_header_res_id;
  857. req_isp_old->hw_update_data.frame_header_cpu_addr =
  858. hw_update_data->frame_header_cpu_addr;
  859. if (req_isp_new->hw_update_data.mup_en) {
  860. req_isp_old->hw_update_data.mup_en =
  861. req_isp_new->hw_update_data.mup_en;
  862. req_isp_old->hw_update_data.mup_val =
  863. req_isp_new->hw_update_data.mup_val;
  864. req_isp_old->hw_update_data.num_exp =
  865. req_isp_new->hw_update_data.num_exp;
  866. }
  867. req_old->request_id = req->request_id;
  868. list_add_tail(&req->list, &ctx->free_req_list);
  869. }
  870. } else {
  871. CAM_WARN(CAM_ISP,
  872. "Received Update pkt before INIT pkt. req_id= %lld",
  873. req->request_id);
  874. rc = -EINVAL;
  875. }
  876. end:
  877. spin_unlock_bh(&ctx->lock);
  878. return rc;
  879. }
  880. static char *__cam_isp_ife_sfe_resource_handle_id_to_type(
  881. uint32_t resource_handle)
  882. {
  883. switch (resource_handle) {
  884. /* IFE output ports */
  885. case CAM_ISP_IFE_OUT_RES_FULL: return "IFE_FULL";
  886. case CAM_ISP_IFE_OUT_RES_DS4: return "IFE_DS4";
  887. case CAM_ISP_IFE_OUT_RES_DS16: return "IFE_DS16";
  888. case CAM_ISP_IFE_OUT_RES_RAW_DUMP: return "IFE_RAW_DUMP";
  889. case CAM_ISP_IFE_OUT_RES_FD: return "IFE_FD";
  890. case CAM_ISP_IFE_OUT_RES_PDAF: return "IFE_PDAF";
  891. case CAM_ISP_IFE_OUT_RES_RDI_0: return "IFE_RDI_0";
  892. case CAM_ISP_IFE_OUT_RES_RDI_1: return "IFE_RDI_1";
  893. case CAM_ISP_IFE_OUT_RES_RDI_2: return "IFE_RDI_2";
  894. case CAM_ISP_IFE_OUT_RES_RDI_3: return "IFE_RDI_3";
  895. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: return "IFE_STATS_HDR_BE";
  896. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: return "IFE_STATS_HDR_BHIST";
  897. case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: return "IFE_STATS_TL_BG";
  898. case CAM_ISP_IFE_OUT_RES_STATS_BF: return "IFE_STATS_BF";
  899. case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: return "IFE_STATS_AWB_BG";
  900. case CAM_ISP_IFE_OUT_RES_STATS_BHIST: return "IFE_STATS_BHIST";
  901. case CAM_ISP_IFE_OUT_RES_STATS_RS: return "IFE_STATS_RS";
  902. case CAM_ISP_IFE_OUT_RES_STATS_CS: return "IFE_STATS_CS";
  903. case CAM_ISP_IFE_OUT_RES_STATS_IHIST: return "IFE_STATS_IHIST";
  904. case CAM_ISP_IFE_OUT_RES_FULL_DISP: return "IFE_FULL_DISP";
  905. case CAM_ISP_IFE_OUT_RES_DS4_DISP: return "IFE_DS4_DISP";
  906. case CAM_ISP_IFE_OUT_RES_DS16_DISP: return "IFE_DS16_DISP";
  907. case CAM_ISP_IFE_OUT_RES_2PD: return "IFE_2PD";
  908. case CAM_ISP_IFE_OUT_RES_LCR: return "IFE_LCR";
  909. case CAM_ISP_IFE_OUT_RES_AWB_BFW: return "IFE_AWB_BFW";
  910. case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: return "IFE_PREPROCESS_2PD";
  911. case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: return "IFE_STATS_AEC_BE";
  912. case CAM_ISP_IFE_OUT_RES_LTM_STATS: return "IFE_LTM_STATS";
  913. case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: return "IFE_STATS_GTM_BHIST";
  914. case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: return "IFE_STATS_BG";
  915. case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: return "IFE_PREPROCESS_RAW";
  916. case CAM_ISP_IFE_OUT_RES_SPARSE_PD: return "IFE_SPARSE_PD";
  917. case CAM_ISP_IFE_OUT_RES_STATS_CAF: return "IFE_STATS_CAF";
  918. case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: return "IFE_STATS_BAYER_RS";
  919. case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: return "IFE_PDAF_PARSED_DATA";
  920. case CAM_ISP_IFE_OUT_RES_STATS_ALSC: return "IFE_STATS_ALSC";
  921. /* SFE output ports */
  922. case CAM_ISP_SFE_OUT_RES_RDI_0: return "SFE_RDI_0";
  923. case CAM_ISP_SFE_OUT_RES_RDI_1: return "SFE_RDI_1";
  924. case CAM_ISP_SFE_OUT_RES_RDI_2: return "SFE_RDI_2";
  925. case CAM_ISP_SFE_OUT_RES_RDI_3: return "SFE_RDI_3";
  926. case CAM_ISP_SFE_OUT_RES_RDI_4: return "SFE_RDI_4";
  927. case CAM_ISP_SFE_OUT_BE_STATS_0: return "SFE_BE_STATS_0";
  928. case CAM_ISP_SFE_OUT_BE_STATS_1: return "SFE_BE_STATS_1";
  929. case CAM_ISP_SFE_OUT_BE_STATS_2: return "SFE_BE_STATS_2";
  930. case CAM_ISP_SFE_OUT_BHIST_STATS_0: return "SFE_BHIST_STATS_0";
  931. case CAM_ISP_SFE_OUT_BHIST_STATS_1: return "SFE_BHIST_STATS_1";
  932. case CAM_ISP_SFE_OUT_BHIST_STATS_2: return "SFE_BHIST_STATS_2";
  933. case CAM_ISP_SFE_OUT_RES_LCR: return "SFE_LCR";
  934. case CAM_ISP_SFE_OUT_RES_RAW_DUMP: return "SFE_PROCESSED_RAW";
  935. case CAM_ISP_SFE_OUT_RES_IR: return "SFE_IR";
  936. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: return "SFE_RS_STATS_0";
  937. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: return "SFE_RS_STATS_1";
  938. case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: return "SFE_RS_STATS_2";
  939. case CAM_ISP_SFE_OUT_HDR_STATS: return "HDR_STATS";
  940. /* SFE input ports */
  941. case CAM_ISP_SFE_IN_RD_0: return "SFE_RD_0";
  942. case CAM_ISP_SFE_IN_RD_1: return "SFE_RD_1";
  943. case CAM_ISP_SFE_IN_RD_2: return "SFE_RD_2";
  944. /* Handle invalid type */
  945. default: return "Invalid_Resource_Type";
  946. }
  947. }
  948. static const char *__cam_isp_tfe_resource_handle_id_to_type(
  949. uint32_t resource_handle)
  950. {
  951. switch (resource_handle) {
  952. /* TFE output ports */
  953. case CAM_ISP_TFE_OUT_RES_FULL: return "TFE_FULL";
  954. case CAM_ISP_TFE_OUT_RES_RAW_DUMP: return "TFE_RAW_DUMP";
  955. case CAM_ISP_TFE_OUT_RES_PDAF: return "TFE_PDAF";
  956. case CAM_ISP_TFE_OUT_RES_RDI_0: return "TFE_RDI_0";
  957. case CAM_ISP_TFE_OUT_RES_RDI_1: return "TFE_RDI_1";
  958. case CAM_ISP_TFE_OUT_RES_RDI_2: return "TFE_RDI_2";
  959. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: return "TFE_STATS_HDR_BE";
  960. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: return "TFE_STATS_HDR_BHIST";
  961. case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: return "TFE_STATS_TL_BG";
  962. case CAM_ISP_TFE_OUT_RES_STATS_BF: return "TFE_STATS_BF";
  963. case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: return "TFE_STATS_AWB_BG";
  964. case CAM_ISP_TFE_OUT_RES_STATS_RS: return "TFE_STATS_RS";
  965. case CAM_ISP_TFE_OUT_RES_DS4: return "TFE_DS_4";
  966. case CAM_ISP_TFE_OUT_RES_DS16: return "TFE_DS_16";
  967. case CAM_ISP_TFE_OUT_RES_AI: return "TFE_AI";
  968. /* Handle invalid type */
  969. default: return "Invalid_Resource_Type";
  970. }
  971. }
  972. static const char *__cam_isp_resource_handle_id_to_type(
  973. uint32_t device_type, uint32_t resource_handle)
  974. {
  975. switch (device_type) {
  976. case CAM_IFE_DEVICE_TYPE:
  977. return __cam_isp_ife_sfe_resource_handle_id_to_type(resource_handle);
  978. case CAM_TFE_DEVICE_TYPE:
  979. return __cam_isp_tfe_resource_handle_id_to_type(resource_handle);
  980. default:
  981. return "INVALID_DEV_TYPE";
  982. }
  983. }
  984. static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
  985. {
  986. uint64_t ts = 0;
  987. if (!evt_data)
  988. return 0;
  989. switch (evt_id) {
  990. case CAM_ISP_HW_EVENT_ERROR:
  991. ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
  992. timestamp;
  993. break;
  994. case CAM_ISP_HW_EVENT_SOF:
  995. ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
  996. timestamp;
  997. break;
  998. case CAM_ISP_HW_EVENT_REG_UPDATE:
  999. ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
  1000. timestamp;
  1001. break;
  1002. case CAM_ISP_HW_EVENT_EPOCH:
  1003. ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
  1004. timestamp;
  1005. break;
  1006. case CAM_ISP_HW_EVENT_EOF:
  1007. ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
  1008. timestamp;
  1009. break;
  1010. case CAM_ISP_HW_EVENT_DONE:
  1011. case CAM_ISP_HW_SECONDARY_EVENT:
  1012. break;
  1013. default:
  1014. CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
  1015. }
  1016. return ts;
  1017. }
  1018. static int __cam_isp_ctx_get_hw_timestamp(struct cam_context *ctx, uint64_t *prev_ts,
  1019. uint64_t *curr_ts, uint64_t *boot_ts)
  1020. {
  1021. struct cam_hw_cmd_args hw_cmd_args;
  1022. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  1023. int rc;
  1024. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  1025. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  1026. hw_cmd_args.u.internal_args = &isp_hw_cmd_args;
  1027. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_SOF_TS;
  1028. rc = ctx->hw_mgr_intf->hw_cmd(ctx->ctxt_to_hw_map, &hw_cmd_args);
  1029. if (rc)
  1030. return rc;
  1031. if (isp_hw_cmd_args.u.sof_ts.prev >= isp_hw_cmd_args.u.sof_ts.curr) {
  1032. CAM_ERR(CAM_ISP, "ctx:%u previous timestamp is greater than current timestamp",
  1033. ctx->ctx_id);
  1034. return -EINVAL;
  1035. }
  1036. *prev_ts = isp_hw_cmd_args.u.sof_ts.prev;
  1037. *curr_ts = isp_hw_cmd_args.u.sof_ts.curr;
  1038. *boot_ts = isp_hw_cmd_args.u.sof_ts.boot;
  1039. return 0;
  1040. }
  1041. static int __cam_isp_ctx_recover_sof_timestamp(struct cam_context *ctx, uint64_t request_id)
  1042. {
  1043. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  1044. uint64_t prev_ts, curr_ts, boot_ts;
  1045. uint64_t a, b, c;
  1046. int rc;
  1047. rc = __cam_isp_ctx_get_hw_timestamp(ctx, &prev_ts, &curr_ts, &boot_ts);
  1048. if (rc) {
  1049. CAM_ERR(CAM_ISP, "ctx:%u Failed to get timestamp from HW", ctx->ctx_id);
  1050. return rc;
  1051. }
  1052. /**
  1053. * If the last received SOF was for frame A and we have missed the SOF for frame B,
  1054. * then we need to find out if the hardware is at frame B or C.
  1055. * +-----+-----+-----+
  1056. * | A | B | C |
  1057. * +-----+-----+-----+
  1058. */
  1059. a = ctx_isp->sof_timestamp_val;
  1060. if (a == prev_ts) {
  1061. /* Hardware is at frame B */
  1062. b = curr_ts;
  1063. CAM_DBG(CAM_ISP, "ctx:%u recovered timestamp (last:0x%llx, curr:0x%llx) req: %llu",
  1064. ctx->ctx_id, a, b, request_id);
  1065. } else if (a < prev_ts) {
  1066. /* Hardware is at frame C */
  1067. b = prev_ts;
  1068. c = curr_ts;
  1069. CAM_DBG(CAM_ISP,
  1070. "ctx:%u recovered timestamp (last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu",
  1071. ctx->ctx_id, a, b, c, request_id);
  1072. } else {
  1073. /* Hardware is at frame A (which we supposedly missed) */
  1074. CAM_ERR_RATE_LIMIT(CAM_ISP,
  1075. "ctx:%u erroneous call to SOF recovery (last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu",
  1076. ctx->ctx_id, a, prev_ts, curr_ts, request_id);
  1077. return 0;
  1078. }
  1079. ctx_isp->boot_timestamp = boot_ts + (b - curr_ts);
  1080. ctx_isp->sof_timestamp_val = b;
  1081. ctx_isp->frame_id++;
  1082. return 0;
  1083. }
  1084. static void __cam_isp_ctx_send_sof_boot_timestamp(
  1085. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1086. uint32_t sof_event_status)
  1087. {
  1088. struct cam_req_mgr_message req_msg;
  1089. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1090. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1091. req_msg.u.frame_msg.request_id = request_id;
  1092. req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
  1093. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1094. req_msg.u.frame_msg.sof_status = sof_event_status;
  1095. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1096. CAM_DBG(CAM_ISP,
  1097. "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u",
  1098. request_id, ctx_isp->frame_id,
  1099. ctx_isp->boot_timestamp, sof_event_status);
  1100. if (cam_req_mgr_notify_message(&req_msg,
  1101. V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
  1102. V4L_EVENT_CAM_REQ_MGR_EVENT))
  1103. CAM_ERR(CAM_ISP,
  1104. "Error in notifying the boot time for req id:%lld",
  1105. request_id);
  1106. }
  1107. static void __cam_isp_ctx_send_unified_timestamp(
  1108. struct cam_isp_context *ctx_isp, uint64_t request_id)
  1109. {
  1110. struct cam_req_mgr_message req_msg;
  1111. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1112. req_msg.u.frame_msg_v2.frame_id = ctx_isp->frame_id;
  1113. req_msg.u.frame_msg_v2.request_id = request_id;
  1114. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_SOF_QTIMER_TIMESTAMP] =
  1115. (request_id == 0) ? 0 : ctx_isp->sof_timestamp_val;
  1116. req_msg.u.frame_msg_v2.timestamps[CAM_REQ_BOOT_TIMESTAMP] = ctx_isp->boot_timestamp;
  1117. req_msg.u.frame_msg_v2.link_hdl = ctx_isp->base->link_hdl;
  1118. req_msg.u.frame_msg_v2.frame_id_meta = ctx_isp->frame_id_meta;
  1119. CAM_DBG(CAM_ISP,
  1120. "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:0x%llx ctx %d\
  1121. boot time stamp:0x%llx", ctx_isp->base->link_hdl, request_id,
  1122. ctx_isp->frame_id, ctx_isp->sof_timestamp_val,ctx_isp->base->ctx_id,
  1123. ctx_isp->boot_timestamp);
  1124. if (cam_req_mgr_notify_message(&req_msg,
  1125. V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1126. CAM_ERR(CAM_ISP,
  1127. "Error in notifying the sof and boot time for req id:%lld",
  1128. request_id);
  1129. }
  1130. static void __cam_isp_ctx_send_sof_timestamp_frame_header(
  1131. struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr,
  1132. uint64_t request_id, uint32_t sof_event_status)
  1133. {
  1134. uint32_t *time32 = NULL;
  1135. uint64_t timestamp = 0;
  1136. struct cam_req_mgr_message req_msg;
  1137. time32 = frame_header_cpu_addr;
  1138. timestamp = (uint64_t) time32[1];
  1139. timestamp = timestamp << 24;
  1140. timestamp |= (uint64_t)(time32[0] >> 8);
  1141. timestamp = mul_u64_u32_div(timestamp,
  1142. CAM_IFE_QTIMER_MUL_FACTOR,
  1143. CAM_IFE_QTIMER_DIV_FACTOR);
  1144. ctx_isp->sof_timestamp_val = timestamp;
  1145. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1146. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1147. req_msg.u.frame_msg.request_id = request_id;
  1148. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1149. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1150. req_msg.u.frame_msg.sof_status = sof_event_status;
  1151. CAM_DBG(CAM_ISP,
  1152. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1153. request_id, ctx_isp->frame_id,
  1154. ctx_isp->sof_timestamp_val, sof_event_status);
  1155. if (cam_req_mgr_notify_message(&req_msg,
  1156. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1157. CAM_ERR(CAM_ISP,
  1158. "Error in notifying the sof time for req id:%lld",
  1159. request_id);
  1160. }
  1161. static void __cam_isp_ctx_send_sof_timestamp(
  1162. struct cam_isp_context *ctx_isp, uint64_t request_id,
  1163. uint32_t sof_event_status)
  1164. {
  1165. struct cam_req_mgr_message req_msg;
  1166. struct cam_context *ctx = ctx_isp->base;
  1167. if (ctx_isp->reported_frame_id == ctx_isp->frame_id) {
  1168. if (__cam_isp_ctx_recover_sof_timestamp(ctx_isp->base, request_id))
  1169. CAM_WARN(CAM_ISP, "Missed SOF. Unable to recover SOF timestamp.");
  1170. }
  1171. if (request_id == 0 && (ctx_isp->reported_frame_id == ctx_isp->frame_id)) {
  1172. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1173. "Missed SOF Recovery for invalid req, Skip notificaiton to userspace Ctx: %u frame_id %u",
  1174. ctx->ctx_id, ctx_isp->frame_id);
  1175. return;
  1176. }
  1177. ctx_isp->reported_frame_id = ctx_isp->frame_id;
  1178. if ((ctx_isp->v4l2_event_sub_ids & (1 << V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS))
  1179. && !ctx_isp->use_frame_header_ts) {
  1180. __cam_isp_ctx_send_unified_timestamp(ctx_isp,request_id);
  1181. return;
  1182. }
  1183. if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
  1184. goto end;
  1185. req_msg.session_hdl = ctx_isp->base->session_hdl;
  1186. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  1187. req_msg.u.frame_msg.request_id = request_id;
  1188. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  1189. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  1190. req_msg.u.frame_msg.sof_status = sof_event_status;
  1191. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  1192. CAM_DBG(CAM_ISP,
  1193. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  1194. request_id, ctx_isp->frame_id,
  1195. ctx_isp->sof_timestamp_val, sof_event_status);
  1196. if (cam_req_mgr_notify_message(&req_msg,
  1197. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  1198. CAM_ERR(CAM_ISP,
  1199. "Error in notifying the sof time for req id:%lld",
  1200. request_id);
  1201. end:
  1202. __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
  1203. request_id, sof_event_status);
  1204. }
  1205. static void __cam_isp_ctx_handle_buf_done_fail_log(
  1206. uint64_t request_id, struct cam_isp_ctx_req *req_isp,
  1207. uint32_t isp_device_type)
  1208. {
  1209. int i;
  1210. const char *handle_type;
  1211. if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
  1212. CAM_ERR(CAM_ISP,
  1213. "Num Resources exceed mMAX %d >= %d ",
  1214. req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
  1215. return;
  1216. }
  1217. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1218. "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d",
  1219. request_id, req_isp->num_fence_map_out, req_isp->num_acked,
  1220. req_isp->bubble_report, req_isp->bubble_detected);
  1221. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1222. "Resource Handles that fail to generate buf_done in prev frame");
  1223. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  1224. if (req_isp->fence_map_out[i].sync_id != -1) {
  1225. handle_type = __cam_isp_resource_handle_id_to_type(
  1226. isp_device_type, req_isp->fence_map_out[i].resource_handle);
  1227. trace_cam_log_event("Buf_done Congestion",
  1228. handle_type, request_id, req_isp->fence_map_out[i].sync_id);
  1229. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1230. "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
  1231. handle_type,
  1232. req_isp->fence_map_out[i].resource_handle,
  1233. req_isp->fence_map_out[i].sync_id);
  1234. }
  1235. }
  1236. }
  1237. static void __cam_isp_context_reset_internal_recovery_params(
  1238. struct cam_isp_context *ctx_isp)
  1239. {
  1240. atomic_set(&ctx_isp->internal_recovery_set, 0);
  1241. atomic_set(&ctx_isp->process_bubble, 0);
  1242. ctx_isp->recovery_req_id = 0;
  1243. ctx_isp->aeb_error_cnt = 0;
  1244. ctx_isp->bubble_frame_cnt = 0;
  1245. }
  1246. static int __cam_isp_context_try_internal_recovery(
  1247. struct cam_isp_context *ctx_isp)
  1248. {
  1249. int rc = 0;
  1250. struct cam_context *ctx = ctx_isp->base;
  1251. struct cam_ctx_request *req;
  1252. struct cam_isp_ctx_req *req_isp;
  1253. /*
  1254. * Start with wait list, if recovery is stil set
  1255. * errored request has not been moved to pending yet.
  1256. * Buf done for errored request has not occurred recover
  1257. * from here
  1258. */
  1259. if (!list_empty(&ctx->wait_req_list)) {
  1260. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  1261. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1262. if (req->request_id == ctx_isp->recovery_req_id) {
  1263. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1264. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1265. if (rc) {
  1266. /* Unable to do bubble recovery reset back to normal */
  1267. CAM_WARN(CAM_ISP,
  1268. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1269. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1270. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1271. req_isp->bubble_detected = false;
  1272. goto end;
  1273. }
  1274. list_del_init(&req->list);
  1275. list_add(&req->list, &ctx->pending_req_list);
  1276. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1277. CAM_INFO(CAM_ISP,
  1278. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1279. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1280. goto end;
  1281. }
  1282. }
  1283. /*
  1284. * If not in wait list only other possibility is request is in pending list
  1285. * on error detection, bubble detect is set assuming new frame after detection
  1286. * comes in, there is an rup it's moved to active list and it finishes with
  1287. * it's buf done's
  1288. */
  1289. if (!list_empty(&ctx->pending_req_list)) {
  1290. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list);
  1291. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1292. if (req->request_id == ctx_isp->recovery_req_id) {
  1293. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  1294. CRM_KMD_ERR_BUBBLE, ctx_isp->recovery_req_id, ctx_isp);
  1295. if (rc) {
  1296. /* Unable to do bubble recovery reset back to normal */
  1297. CAM_WARN(CAM_ISP,
  1298. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  1299. req->request_id, ctx->ctx_id, ctx->link_hdl);
  1300. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1301. req_isp->bubble_detected = false;
  1302. goto end;
  1303. }
  1304. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1305. CAM_INFO(CAM_ISP,
  1306. "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered",
  1307. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  1308. goto end;
  1309. }
  1310. }
  1311. /* If request is not found in either of the lists skip recovery */
  1312. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  1313. end:
  1314. return rc;
  1315. }
  1316. static int __cam_isp_ctx_handle_buf_done_for_req_list(
  1317. struct cam_isp_context *ctx_isp,
  1318. struct cam_ctx_request *req)
  1319. {
  1320. int rc = 0, i;
  1321. uint64_t buf_done_req_id;
  1322. struct cam_isp_ctx_req *req_isp;
  1323. struct cam_context *ctx = ctx_isp->base;
  1324. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1325. ctx_isp->active_req_cnt--;
  1326. buf_done_req_id = req->request_id;
  1327. if (req_isp->bubble_detected && req_isp->bubble_report) {
  1328. req_isp->num_acked = 0;
  1329. req_isp->num_deferred_acks = 0;
  1330. req_isp->bubble_detected = false;
  1331. list_del_init(&req->list);
  1332. atomic_set(&ctx_isp->process_bubble, 0);
  1333. req_isp->cdm_reset_before_apply = false;
  1334. ctx_isp->bubble_frame_cnt = 0;
  1335. if (buf_done_req_id <= ctx->last_flush_req) {
  1336. for (i = 0; i < req_isp->num_fence_map_out; i++)
  1337. rc = cam_sync_signal(
  1338. req_isp->fence_map_out[i].sync_id,
  1339. CAM_SYNC_STATE_SIGNALED_ERROR,
  1340. CAM_SYNC_ISP_EVENT_BUBBLE);
  1341. list_add_tail(&req->list, &ctx->free_req_list);
  1342. CAM_DBG(CAM_REQ,
  1343. "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
  1344. buf_done_req_id, ctx_isp->active_req_cnt,
  1345. ctx->ctx_id);
  1346. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1347. } else {
  1348. list_add(&req->list, &ctx->pending_req_list);
  1349. CAM_DBG(CAM_REQ,
  1350. "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
  1351. req->request_id, ctx_isp->active_req_cnt,
  1352. ctx->ctx_id);
  1353. }
  1354. } else {
  1355. if (!ctx_isp->use_frame_header_ts) {
  1356. if (ctx_isp->reported_req_id < buf_done_req_id) {
  1357. ctx_isp->reported_req_id = buf_done_req_id;
  1358. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  1359. buf_done_req_id,
  1360. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1361. }
  1362. }
  1363. list_del_init(&req->list);
  1364. list_add_tail(&req->list, &ctx->free_req_list);
  1365. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  1366. req_isp->cdm_reset_before_apply = false;
  1367. req_isp->num_acked = 0;
  1368. req_isp->num_deferred_acks = 0;
  1369. /*
  1370. * Only update the process_bubble and bubble_frame_cnt
  1371. * when bubble is detected on this req, in case the other
  1372. * request is processing bubble.
  1373. */
  1374. if (req_isp->bubble_detected) {
  1375. atomic_set(&ctx_isp->process_bubble, 0);
  1376. ctx_isp->bubble_frame_cnt = 0;
  1377. req_isp->bubble_detected = false;
  1378. }
  1379. CAM_DBG(CAM_REQ,
  1380. "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
  1381. buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1382. ctx_isp->req_info.last_bufdone_req_id = req->request_id;
  1383. ctx_isp->last_bufdone_err_apply_req_id = 0;
  1384. }
  1385. if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt)
  1386. __cam_isp_context_try_internal_recovery(ctx_isp);
  1387. cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
  1388. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1389. CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
  1390. __cam_isp_ctx_update_event_record(ctx_isp,
  1391. CAM_ISP_CTX_EVENT_BUFDONE, req);
  1392. return rc;
  1393. }
  1394. static int __cam_isp_ctx_handle_buf_done_for_request(
  1395. struct cam_isp_context *ctx_isp,
  1396. struct cam_ctx_request *req,
  1397. struct cam_isp_hw_done_event_data *done,
  1398. uint32_t bubble_state,
  1399. struct cam_isp_hw_done_event_data *done_next_req)
  1400. {
  1401. int rc = 0;
  1402. int i, j;
  1403. struct cam_isp_ctx_req *req_isp;
  1404. struct cam_context *ctx = ctx_isp->base;
  1405. const char *handle_type;
  1406. trace_cam_buf_done("ISP", ctx, req);
  1407. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1408. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1409. bubble_state, req_isp->bubble_detected);
  1410. done_next_req->num_handles = 0;
  1411. done_next_req->timestamp = done->timestamp;
  1412. for (i = 0; i < done->num_handles; i++) {
  1413. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1414. if (done->resource_handle[i] ==
  1415. req_isp->fence_map_out[j].resource_handle)
  1416. break;
  1417. }
  1418. if (j == req_isp->num_fence_map_out) {
  1419. /*
  1420. * If not found in current request, it could be
  1421. * belonging to next request, this can happen if
  1422. * IRQ delay happens. It is only valid when the
  1423. * platform doesn't have last consumed address.
  1424. */
  1425. CAM_WARN(CAM_ISP,
  1426. "BUF_DONE for res %s not found in Req %lld ",
  1427. __cam_isp_resource_handle_id_to_type(
  1428. ctx_isp->isp_device_type,
  1429. done->resource_handle[i]),
  1430. req->request_id);
  1431. done_next_req->resource_handle
  1432. [done_next_req->num_handles++] =
  1433. done->resource_handle[i];
  1434. continue;
  1435. }
  1436. if (req_isp->fence_map_out[j].sync_id == -1) {
  1437. handle_type =
  1438. __cam_isp_resource_handle_id_to_type(
  1439. ctx_isp->isp_device_type,
  1440. req_isp->fence_map_out[j].resource_handle);
  1441. CAM_WARN(CAM_ISP,
  1442. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1443. req->request_id, i, j, handle_type);
  1444. trace_cam_log_event("Duplicate BufDone",
  1445. handle_type, req->request_id, ctx->ctx_id);
  1446. done_next_req->resource_handle
  1447. [done_next_req->num_handles++] =
  1448. done->resource_handle[i];
  1449. continue;
  1450. }
  1451. /* Get buf handles from packet and retrieve them from presil framework */
  1452. if (cam_presil_mode_enabled()) {
  1453. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1454. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1455. if (rc) {
  1456. CAM_ERR(CAM_ISP,
  1457. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1458. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1459. return rc;
  1460. }
  1461. }
  1462. if (!req_isp->bubble_detected) {
  1463. CAM_DBG(CAM_ISP,
  1464. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1465. req->request_id,
  1466. req_isp->fence_map_out[j].resource_handle,
  1467. req_isp->fence_map_out[j].sync_id,
  1468. ctx->ctx_id);
  1469. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1470. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1471. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1472. if (rc)
  1473. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  1474. rc);
  1475. } else if (!req_isp->bubble_report) {
  1476. CAM_DBG(CAM_ISP,
  1477. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1478. req->request_id,
  1479. req_isp->fence_map_out[j].resource_handle,
  1480. req_isp->fence_map_out[j].sync_id,
  1481. ctx->ctx_id);
  1482. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1483. CAM_SYNC_STATE_SIGNALED_ERROR,
  1484. CAM_SYNC_ISP_EVENT_BUBBLE);
  1485. if (rc)
  1486. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  1487. rc);
  1488. } else {
  1489. /*
  1490. * Ignore the buffer done if bubble detect is on
  1491. * Increment the ack number here, and queue the
  1492. * request back to pending list whenever all the
  1493. * buffers are done.
  1494. */
  1495. req_isp->num_acked++;
  1496. CAM_DBG(CAM_ISP,
  1497. "buf done with bubble state %d recovery %d for req %lld, ctx %u",
  1498. bubble_state,
  1499. req_isp->bubble_report,
  1500. req->request_id,
  1501. ctx->ctx_id);
  1502. continue;
  1503. }
  1504. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1505. req->request_id,
  1506. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1507. if (!rc) {
  1508. req_isp->num_acked++;
  1509. req_isp->fence_map_out[j].sync_id = -1;
  1510. }
  1511. if ((ctx_isp->use_frame_header_ts) &&
  1512. (req_isp->hw_update_data.frame_header_res_id ==
  1513. req_isp->fence_map_out[j].resource_handle))
  1514. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1515. ctx_isp,
  1516. req_isp->hw_update_data.frame_header_cpu_addr,
  1517. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1518. }
  1519. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1520. /* Should not happen */
  1521. CAM_ERR(CAM_ISP,
  1522. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1523. req->request_id, req_isp->num_acked,
  1524. req_isp->num_fence_map_out, ctx->ctx_id);
  1525. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1526. }
  1527. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1528. return rc;
  1529. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1530. return rc;
  1531. }
  1532. static int __cam_isp_handle_deferred_buf_done(
  1533. struct cam_isp_context *ctx_isp,
  1534. struct cam_ctx_request *req,
  1535. bool bubble_handling,
  1536. uint32_t status, uint32_t event_cause)
  1537. {
  1538. int i, j;
  1539. int rc = 0;
  1540. struct cam_isp_ctx_req *req_isp =
  1541. (struct cam_isp_ctx_req *) req->req_priv;
  1542. struct cam_context *ctx = ctx_isp->base;
  1543. CAM_DBG(CAM_ISP,
  1544. "ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
  1545. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1546. req_isp->num_acked, bubble_handling);
  1547. for (i = 0; i < req_isp->num_deferred_acks; i++) {
  1548. j = req_isp->deferred_fence_map_index[i];
  1549. CAM_DBG(CAM_ISP,
  1550. "ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
  1551. ctx->ctx_id, status, event_cause,
  1552. req->request_id,
  1553. req_isp->fence_map_out[j].resource_handle,
  1554. req_isp->fence_map_out[j].sync_id);
  1555. if (req_isp->fence_map_out[j].sync_id == -1) {
  1556. CAM_WARN(CAM_ISP,
  1557. "ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
  1558. ctx->ctx_id, req->request_id, j,
  1559. req_isp->fence_map_out[j].resource_handle);
  1560. continue;
  1561. }
  1562. if (!bubble_handling) {
  1563. CAM_WARN(CAM_ISP,
  1564. "Unexpected Buf done for res=0x%x on ctx[%d] for Req %llu, status=%d, possible bh delays",
  1565. req_isp->fence_map_out[j].resource_handle, ctx->ctx_id,
  1566. req->request_id, status);
  1567. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1568. status, event_cause);
  1569. if (rc) {
  1570. CAM_ERR(CAM_ISP,
  1571. "ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
  1572. ctx->ctx_id, req->request_id,
  1573. req_isp->fence_map_out[j].sync_id,
  1574. status, rc);
  1575. } else {
  1576. req_isp->num_acked++;
  1577. req_isp->fence_map_out[j].sync_id = -1;
  1578. }
  1579. } else {
  1580. req_isp->num_acked++;
  1581. }
  1582. }
  1583. CAM_DBG(CAM_ISP,
  1584. "ctx[%d] : Req %llu : Handled %d deferred buf_dones num_acked=%d, num_fence_map_out=%d",
  1585. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  1586. req_isp->num_acked, req_isp->num_fence_map_out);
  1587. req_isp->num_deferred_acks = 0;
  1588. return rc;
  1589. }
  1590. static int __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  1591. struct cam_isp_context *ctx_isp,
  1592. struct cam_ctx_request *req)
  1593. {
  1594. int rc = 0;
  1595. struct cam_context *ctx = ctx_isp->base;
  1596. struct cam_isp_ctx_req *req_isp;
  1597. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1598. if (req_isp->num_deferred_acks)
  1599. rc = __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1600. req_isp->bubble_report,
  1601. CAM_SYNC_STATE_SIGNALED_ERROR,
  1602. CAM_SYNC_ISP_EVENT_BUBBLE);
  1603. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1604. /* Should not happen */
  1605. CAM_ERR(CAM_ISP,
  1606. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1607. req->request_id, req_isp->num_acked,
  1608. req_isp->num_fence_map_out, ctx->ctx_id);
  1609. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  1610. }
  1611. if (req_isp->num_acked == req_isp->num_fence_map_out)
  1612. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1613. return rc;
  1614. }
  1615. static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1616. struct cam_isp_context *ctx_isp,
  1617. struct cam_ctx_request *req,
  1618. struct cam_isp_hw_done_event_data *done,
  1619. uint32_t bubble_state,
  1620. bool verify_consumed_addr,
  1621. bool defer_buf_done)
  1622. {
  1623. int rc = 0;
  1624. int i, j;
  1625. struct cam_isp_ctx_req *req_isp;
  1626. struct cam_context *ctx = ctx_isp->base;
  1627. const char *handle_type;
  1628. uint32_t cmp_addr = 0;
  1629. struct cam_isp_hw_done_event_data unhandled_done = {0};
  1630. trace_cam_buf_done("ISP", ctx, req);
  1631. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1632. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  1633. bubble_state, req_isp->bubble_detected);
  1634. if (done->num_handles > CAM_NUM_OUT_PER_COMP_IRQ_MAX) {
  1635. CAM_ERR(CAM_ISP, "ctx: %u req: %llu num_handles: %u is more than %u",
  1636. ctx->ctx_id, req->request_id,
  1637. done->num_handles, CAM_NUM_OUT_PER_COMP_IRQ_MAX);
  1638. return -EINVAL;
  1639. }
  1640. unhandled_done.timestamp = done->timestamp;
  1641. for (i = 0; i < done->num_handles; i++) {
  1642. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1643. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1644. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1645. req_isp->fence_map_out[j].image_buf_addr[0];
  1646. if (verify_consumed_addr && (done->last_consumed_addr[i] != cmp_addr))
  1647. continue;
  1648. if (done->resource_handle[i] ==
  1649. req_isp->fence_map_out[j].resource_handle)
  1650. break;
  1651. }
  1652. if (j == req_isp->num_fence_map_out) {
  1653. /*
  1654. * If not found in current request, it could be
  1655. * belonging to next request, this can happen if
  1656. * IRQ delay happens. It is only valid when the
  1657. * platform doesn't have last consumed address.
  1658. */
  1659. CAM_DBG(CAM_ISP,
  1660. "BUF_DONE for res %s not found in Req %lld ",
  1661. __cam_isp_resource_handle_id_to_type(
  1662. ctx_isp->isp_device_type, done->resource_handle[i]),
  1663. req->request_id);
  1664. unhandled_done.resource_handle[unhandled_done.num_handles] =
  1665. done->resource_handle[i];
  1666. unhandled_done.last_consumed_addr[unhandled_done.num_handles] =
  1667. done->last_consumed_addr[i];
  1668. unhandled_done.num_handles++;
  1669. continue;
  1670. }
  1671. if (req_isp->fence_map_out[j].sync_id == -1) {
  1672. handle_type = __cam_isp_resource_handle_id_to_type(
  1673. ctx_isp->isp_device_type,
  1674. req_isp->fence_map_out[j].resource_handle);
  1675. CAM_WARN(CAM_ISP,
  1676. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1677. req->request_id, i, j, handle_type);
  1678. trace_cam_log_event("Duplicate BufDone",
  1679. handle_type, req->request_id, ctx->ctx_id);
  1680. continue;
  1681. }
  1682. /* Get buf handles from packet and retrieve them from presil framework */
  1683. if (cam_presil_mode_enabled()) {
  1684. rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet,
  1685. ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle);
  1686. if (rc) {
  1687. CAM_ERR(CAM_ISP,
  1688. "Failed to retrieve image buffers req_id:%d ctx_id:%d bubble detected:%d rc:%d",
  1689. req->request_id, ctx->ctx_id, req_isp->bubble_detected, rc);
  1690. return rc;
  1691. }
  1692. }
  1693. if (defer_buf_done) {
  1694. uint32_t deferred_indx = req_isp->num_deferred_acks;
  1695. /*
  1696. * If we are handling this BUF_DONE event for a request
  1697. * that is still in wait_list, do not signal now,
  1698. * instead mark it as done and handle it later -
  1699. * if this request is going into BUBBLE state later
  1700. * it will automatically be re-applied. If this is not
  1701. * going into BUBBLE, signal fences later.
  1702. * Note - we will come here only if the last consumed
  1703. * address matches with this ports buffer.
  1704. */
  1705. req_isp->deferred_fence_map_index[deferred_indx] = j;
  1706. req_isp->num_deferred_acks++;
  1707. CAM_DBG(CAM_ISP,
  1708. "ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
  1709. ctx->ctx_id, req->request_id, bubble_state,
  1710. req_isp->bubble_report);
  1711. CAM_DBG(CAM_ISP,
  1712. "ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
  1713. ctx->ctx_id, req_isp->num_deferred_acks, j,
  1714. req_isp->fence_map_out[j].resource_handle,
  1715. req_isp->fence_map_out[j].sync_id);
  1716. continue;
  1717. } else if (!req_isp->bubble_detected) {
  1718. CAM_DBG(CAM_ISP,
  1719. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1720. req->request_id,
  1721. req_isp->fence_map_out[j].resource_handle,
  1722. req_isp->fence_map_out[j].sync_id,
  1723. ctx->ctx_id);
  1724. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1725. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1726. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1727. if (rc) {
  1728. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1729. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1730. } else if (req_isp->num_deferred_acks) {
  1731. /* Process deferred buf_done acks */
  1732. __cam_isp_handle_deferred_buf_done(ctx_isp,
  1733. req, false,
  1734. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1735. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1736. }
  1737. /* Reset fence */
  1738. req_isp->fence_map_out[j].sync_id = -1;
  1739. } else if (!req_isp->bubble_report) {
  1740. CAM_DBG(CAM_ISP,
  1741. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1742. req->request_id,
  1743. req_isp->fence_map_out[j].resource_handle,
  1744. req_isp->fence_map_out[j].sync_id,
  1745. ctx->ctx_id);
  1746. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1747. CAM_SYNC_STATE_SIGNALED_ERROR,
  1748. CAM_SYNC_ISP_EVENT_BUBBLE);
  1749. if (rc) {
  1750. CAM_ERR(CAM_ISP, "Sync = %u for req = %llu failed with rc = %d",
  1751. req_isp->fence_map_out[j].sync_id, req->request_id, rc);
  1752. } else if (req_isp->num_deferred_acks) {
  1753. /* Process deferred buf_done acks */
  1754. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1755. false,
  1756. CAM_SYNC_STATE_SIGNALED_ERROR,
  1757. CAM_SYNC_ISP_EVENT_BUBBLE);
  1758. }
  1759. /* Reset fence */
  1760. req_isp->fence_map_out[j].sync_id = -1;
  1761. } else {
  1762. /*
  1763. * Ignore the buffer done if bubble detect is on
  1764. * Increment the ack number here, and queue the
  1765. * request back to pending list whenever all the
  1766. * buffers are done.
  1767. */
  1768. req_isp->num_acked++;
  1769. CAM_DBG(CAM_ISP,
  1770. "buf done with bubble state %d recovery %d for req %lld, ctx %u",
  1771. bubble_state,
  1772. req_isp->bubble_report,
  1773. req->request_id,
  1774. ctx->ctx_id);
  1775. /* Process deferred buf_done acks */
  1776. if (req_isp->num_deferred_acks)
  1777. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1778. true,
  1779. CAM_SYNC_STATE_SIGNALED_ERROR,
  1780. CAM_SYNC_ISP_EVENT_BUBBLE);
  1781. if (req_isp->num_acked == req_isp->num_fence_map_out) {
  1782. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1783. if (rc)
  1784. CAM_ERR(CAM_ISP,
  1785. "Error in buf done for req = %llu with rc = %d",
  1786. req->request_id, rc);
  1787. return rc;
  1788. }
  1789. continue;
  1790. }
  1791. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1792. req->request_id,
  1793. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1794. if (!rc) {
  1795. req_isp->num_acked++;
  1796. }
  1797. if ((ctx_isp->use_frame_header_ts) &&
  1798. (req_isp->hw_update_data.frame_header_res_id ==
  1799. req_isp->fence_map_out[j].resource_handle))
  1800. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1801. ctx_isp,
  1802. req_isp->hw_update_data.frame_header_cpu_addr,
  1803. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1804. }
  1805. if ((unhandled_done.num_handles > 0) && (!defer_buf_done))
  1806. __cam_isp_ctx_check_deferred_buf_done(
  1807. ctx_isp, &unhandled_done, bubble_state);
  1808. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1809. /* Should not happen */
  1810. CAM_ERR(CAM_ISP,
  1811. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1812. req->request_id, req_isp->num_acked,
  1813. req_isp->num_fence_map_out, ctx->ctx_id);
  1814. }
  1815. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1816. return rc;
  1817. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1818. return rc;
  1819. }
  1820. static int __cam_isp_ctx_handle_buf_done(
  1821. struct cam_isp_context *ctx_isp,
  1822. struct cam_isp_hw_done_event_data *done,
  1823. uint32_t bubble_state)
  1824. {
  1825. int rc = 0;
  1826. struct cam_ctx_request *req;
  1827. struct cam_context *ctx = ctx_isp->base;
  1828. struct cam_isp_hw_done_event_data done_next_req;
  1829. if (list_empty(&ctx->active_req_list)) {
  1830. CAM_WARN(CAM_ISP, "Buf done with no active request");
  1831. return 0;
  1832. }
  1833. req = list_first_entry(&ctx->active_req_list,
  1834. struct cam_ctx_request, list);
  1835. rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done,
  1836. bubble_state, &done_next_req);
  1837. if (done_next_req.num_handles) {
  1838. struct cam_isp_hw_done_event_data unhandled_res;
  1839. struct cam_ctx_request *next_req = list_last_entry(
  1840. &ctx->active_req_list, struct cam_ctx_request, list);
  1841. if (next_req->request_id != req->request_id) {
  1842. /*
  1843. * Few resource handles are already signalled in the
  1844. * current request, lets check if there is another
  1845. * request waiting for these resources. This can
  1846. * happen if handling some of next request's buf done
  1847. * events are happening first before handling current
  1848. * request's remaining buf dones due to IRQ scheduling.
  1849. * Lets check only one more request as we will have
  1850. * maximum of 2 requests in active_list at any time.
  1851. */
  1852. CAM_WARN(CAM_ISP,
  1853. "Unhandled buf done resources for req %lld, trying next request %lld in active_list",
  1854. req->request_id, next_req->request_id);
  1855. __cam_isp_ctx_handle_buf_done_for_request(ctx_isp,
  1856. next_req, &done_next_req,
  1857. bubble_state, &unhandled_res);
  1858. if (unhandled_res.num_handles == 0)
  1859. CAM_INFO(CAM_ISP,
  1860. "BUF Done event handed for next request %lld",
  1861. next_req->request_id);
  1862. else
  1863. CAM_ERR(CAM_ISP,
  1864. "BUF Done not handled for next request %lld",
  1865. next_req->request_id);
  1866. } else {
  1867. CAM_WARN(CAM_ISP,
  1868. "Req %lld only active request, spurious buf_done rxd",
  1869. req->request_id);
  1870. }
  1871. }
  1872. return rc;
  1873. }
  1874. static void __cam_isp_ctx_buf_done_match_req(
  1875. struct cam_ctx_request *req,
  1876. struct cam_isp_hw_done_event_data *done,
  1877. bool *irq_delay_detected)
  1878. {
  1879. int i, j;
  1880. uint32_t match_count = 0;
  1881. struct cam_isp_ctx_req *req_isp;
  1882. uint32_t cmp_addr = 0;
  1883. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1884. for (i = 0; i < done->num_handles; i++) {
  1885. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1886. cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE(
  1887. req_isp->fence_map_out[j].image_buf_addr[0]) :
  1888. req_isp->fence_map_out[j].image_buf_addr[0];
  1889. if ((done->resource_handle[i] ==
  1890. req_isp->fence_map_out[j].resource_handle) &&
  1891. (done->last_consumed_addr[i] == cmp_addr)) {
  1892. match_count++;
  1893. break;
  1894. }
  1895. }
  1896. }
  1897. if (match_count > 0)
  1898. *irq_delay_detected = true;
  1899. else
  1900. *irq_delay_detected = false;
  1901. CAM_DBG(CAM_ISP,
  1902. "buf done num handles %d match count %d for next req:%lld",
  1903. done->num_handles, match_count, req->request_id);
  1904. CAM_DBG(CAM_ISP,
  1905. "irq_delay_detected %d", *irq_delay_detected);
  1906. }
  1907. static void __cam_isp_ctx_try_buf_done_process_for_active_request(
  1908. uint32_t deferred_ack_start_idx, struct cam_isp_context *ctx_isp,
  1909. struct cam_ctx_request *deferred_req)
  1910. {
  1911. int i, j, deferred_map_idx, rc;
  1912. struct cam_context *ctx = ctx_isp->base;
  1913. struct cam_ctx_request *curr_active_req;
  1914. struct cam_isp_ctx_req *curr_active_isp_req;
  1915. struct cam_isp_ctx_req *deferred_isp_req;
  1916. if (list_empty(&ctx->active_req_list))
  1917. return;
  1918. curr_active_req = list_first_entry(&ctx->active_req_list,
  1919. struct cam_ctx_request, list);
  1920. curr_active_isp_req = (struct cam_isp_ctx_req *)curr_active_req->req_priv;
  1921. deferred_isp_req = (struct cam_isp_ctx_req *)deferred_req->req_priv;
  1922. /* Check from newly updated deferred acks */
  1923. for (i = deferred_ack_start_idx; i < deferred_isp_req->num_deferred_acks; i++) {
  1924. deferred_map_idx = deferred_isp_req->deferred_fence_map_index[i];
  1925. for (j = 0; j < curr_active_isp_req->num_fence_map_out; j++) {
  1926. /* resource needs to match */
  1927. if (curr_active_isp_req->fence_map_out[j].resource_handle !=
  1928. deferred_isp_req->fence_map_out[deferred_map_idx].resource_handle)
  1929. continue;
  1930. /* Check if fence is valid */
  1931. if (curr_active_isp_req->fence_map_out[j].sync_id == -1)
  1932. break;
  1933. CAM_WARN(CAM_ISP,
  1934. "Processing delayed buf done req: %llu bubble_detected: %s res: 0x%x fd: 0x%x, ctx: %u [deferred req: %llu last applied: %llu]",
  1935. curr_active_req->request_id,
  1936. CAM_BOOL_TO_YESNO(curr_active_isp_req->bubble_detected),
  1937. curr_active_isp_req->fence_map_out[j].resource_handle,
  1938. curr_active_isp_req->fence_map_out[j].sync_id, ctx->ctx_id,
  1939. deferred_req->request_id, ctx_isp->last_applied_req_id);
  1940. /* Signal only if bubble is not detected for this request */
  1941. if (!curr_active_isp_req->bubble_detected) {
  1942. rc = cam_sync_signal(curr_active_isp_req->fence_map_out[j].sync_id,
  1943. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1944. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1945. if (rc)
  1946. CAM_ERR(CAM_ISP,
  1947. "Sync: %d for req: %llu failed with rc: %d",
  1948. curr_active_isp_req->fence_map_out[j].sync_id,
  1949. curr_active_req->request_id, rc);
  1950. curr_active_isp_req->fence_map_out[j].sync_id = -1;
  1951. }
  1952. curr_active_isp_req->num_acked++;
  1953. break;
  1954. }
  1955. }
  1956. }
  1957. static int __cam_isp_ctx_check_deferred_buf_done(
  1958. struct cam_isp_context *ctx_isp,
  1959. struct cam_isp_hw_done_event_data *done,
  1960. uint32_t bubble_state)
  1961. {
  1962. int rc = 0;
  1963. uint32_t curr_num_deferred = 0;
  1964. struct cam_ctx_request *req;
  1965. struct cam_context *ctx = ctx_isp->base;
  1966. struct cam_isp_ctx_req *req_isp;
  1967. bool req_in_pending_wait_list = false;
  1968. if (!list_empty(&ctx->wait_req_list)) {
  1969. req = list_first_entry(&ctx->wait_req_list,
  1970. struct cam_ctx_request, list);
  1971. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1972. curr_num_deferred = req_isp->num_deferred_acks;
  1973. req_in_pending_wait_list = true;
  1974. if (ctx_isp->last_applied_req_id !=
  1975. ctx_isp->last_bufdone_err_apply_req_id) {
  1976. CAM_DBG(CAM_ISP,
  1977. "Trying to find buf done with req in wait list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u",
  1978. req->request_id, ctx_isp->last_applied_req_id,
  1979. ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred);
  1980. ctx_isp->last_bufdone_err_apply_req_id =
  1981. ctx_isp->last_applied_req_id;
  1982. }
  1983. /*
  1984. * Verify consumed address for this request to make sure
  1985. * we are handling the buf_done for the correct
  1986. * buffer. Also defer actual buf_done handling, i.e
  1987. * do not signal the fence as this request may go into
  1988. * Bubble state eventully.
  1989. */
  1990. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1991. ctx_isp, req, done, bubble_state, true, true);
  1992. /* Check for active req if any deferred is processed */
  1993. if (req_isp->num_deferred_acks > curr_num_deferred)
  1994. __cam_isp_ctx_try_buf_done_process_for_active_request(
  1995. curr_num_deferred, ctx_isp, req);
  1996. } else if (!list_empty(&ctx->pending_req_list)) {
  1997. /*
  1998. * We saw the case that the hw config is blocked due to
  1999. * some reason, the we get the reg upd and buf done before
  2000. * the req is added to wait req list.
  2001. */
  2002. req = list_first_entry(&ctx->pending_req_list,
  2003. struct cam_ctx_request, list);
  2004. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2005. curr_num_deferred = req_isp->num_deferred_acks;
  2006. req_in_pending_wait_list = true;
  2007. if (ctx_isp->last_applied_req_id !=
  2008. ctx_isp->last_bufdone_err_apply_req_id) {
  2009. CAM_DBG(CAM_ISP,
  2010. "Trying to find buf done with req in pending list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u",
  2011. req->request_id, ctx_isp->last_applied_req_id,
  2012. ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred);
  2013. ctx_isp->last_bufdone_err_apply_req_id =
  2014. ctx_isp->last_applied_req_id;
  2015. }
  2016. /*
  2017. * Verify consumed address for this request to make sure
  2018. * we are handling the buf_done for the correct
  2019. * buffer. Also defer actual buf_done handling, i.e
  2020. * do not signal the fence as this request may go into
  2021. * Bubble state eventully.
  2022. */
  2023. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2024. ctx_isp, req, done, bubble_state, true, true);
  2025. /* Check for active req if any deferred is processed */
  2026. if (req_isp->num_deferred_acks > curr_num_deferred)
  2027. __cam_isp_ctx_try_buf_done_process_for_active_request(
  2028. curr_num_deferred, ctx_isp, req);
  2029. }
  2030. if (!req_in_pending_wait_list && (ctx_isp->last_applied_req_id !=
  2031. ctx_isp->last_bufdone_err_apply_req_id)) {
  2032. CAM_DBG(CAM_ISP,
  2033. "Buf done with no active request bubble_state=%d last_applied_req_id:%lld",
  2034. bubble_state, ctx_isp->last_applied_req_id);
  2035. ctx_isp->last_bufdone_err_apply_req_id =
  2036. ctx_isp->last_applied_req_id;
  2037. }
  2038. return rc;
  2039. }
  2040. static int __cam_isp_ctx_handle_buf_done_verify_addr(
  2041. struct cam_isp_context *ctx_isp,
  2042. struct cam_isp_hw_done_event_data *done,
  2043. uint32_t bubble_state)
  2044. {
  2045. int rc = 0;
  2046. bool irq_delay_detected = false;
  2047. struct cam_ctx_request *req;
  2048. struct cam_ctx_request *next_req = NULL;
  2049. struct cam_context *ctx = ctx_isp->base;
  2050. if (list_empty(&ctx->active_req_list)) {
  2051. return __cam_isp_ctx_check_deferred_buf_done(
  2052. ctx_isp, done, bubble_state);
  2053. }
  2054. req = list_first_entry(&ctx->active_req_list,
  2055. struct cam_ctx_request, list);
  2056. if (ctx_isp->active_req_cnt > 1) {
  2057. next_req = list_last_entry(
  2058. &ctx->active_req_list,
  2059. struct cam_ctx_request, list);
  2060. if (next_req->request_id != req->request_id)
  2061. __cam_isp_ctx_buf_done_match_req(next_req, done,
  2062. &irq_delay_detected);
  2063. else
  2064. CAM_WARN(CAM_ISP,
  2065. "Req %lld only active request, spurious buf_done rxd",
  2066. req->request_id);
  2067. }
  2068. /*
  2069. * If irq delay isn't detected, then we need to verify
  2070. * the consumed address for current req, otherwise, we
  2071. * can't verify the consumed address.
  2072. */
  2073. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2074. ctx_isp, req, done, bubble_state,
  2075. !irq_delay_detected, false);
  2076. /*
  2077. * Verify the consumed address for next req all the time,
  2078. * since the reported buf done event may belong to current
  2079. * req, then we can't signal this event for next req.
  2080. */
  2081. if (!rc && irq_delay_detected)
  2082. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  2083. ctx_isp, next_req, done,
  2084. bubble_state, true, false);
  2085. return rc;
  2086. }
  2087. static int __cam_isp_ctx_handle_buf_done_in_activated_state(
  2088. struct cam_isp_context *ctx_isp,
  2089. struct cam_isp_hw_done_event_data *done,
  2090. uint32_t bubble_state)
  2091. {
  2092. int rc = 0;
  2093. if (ctx_isp->support_consumed_addr)
  2094. rc = __cam_isp_ctx_handle_buf_done_verify_addr(
  2095. ctx_isp, done, bubble_state);
  2096. else
  2097. rc = __cam_isp_ctx_handle_buf_done(
  2098. ctx_isp, done, bubble_state);
  2099. return rc;
  2100. }
  2101. static int __cam_isp_ctx_apply_pending_req(
  2102. void *priv, void *data)
  2103. {
  2104. int rc = 0;
  2105. int64_t prev_applied_req;
  2106. struct cam_context *ctx = NULL;
  2107. struct cam_isp_context *ctx_isp = priv;
  2108. struct cam_ctx_request *req;
  2109. struct cam_isp_ctx_req *req_isp;
  2110. struct cam_hw_config_args cfg = {0};
  2111. if (!ctx_isp) {
  2112. CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx);
  2113. rc = -EINVAL;
  2114. goto end;
  2115. }
  2116. ctx = ctx_isp->base;
  2117. if (list_empty(&ctx->pending_req_list)) {
  2118. CAM_DBG(CAM_ISP, "No pending requests to apply");
  2119. rc = -EFAULT;
  2120. goto end;
  2121. }
  2122. if (ctx_isp->vfps_aux_context) {
  2123. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED)
  2124. goto end;
  2125. if (ctx_isp->active_req_cnt >= 1)
  2126. goto end;
  2127. } else {
  2128. if ((ctx->state != CAM_CTX_ACTIVATED) ||
  2129. (!atomic_read(&ctx_isp->rxd_epoch)) ||
  2130. (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED))
  2131. goto end;
  2132. if (ctx_isp->active_req_cnt >= 2)
  2133. goto end;
  2134. }
  2135. spin_lock_bh(&ctx->lock);
  2136. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2137. list);
  2138. spin_unlock_bh(&ctx->lock);
  2139. CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
  2140. req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
  2141. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2142. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2143. cfg.request_id = req->request_id;
  2144. cfg.hw_update_entries = req_isp->cfg;
  2145. cfg.num_hw_update_entries = req_isp->num_cfg;
  2146. cfg.priv = &req_isp->hw_update_data;
  2147. /*
  2148. * Offline mode may receive the SOF and REG_UPD earlier than
  2149. * CDM processing return back, so we set the substate before
  2150. * apply setting.
  2151. */
  2152. spin_lock_bh(&ctx->lock);
  2153. atomic_set(&ctx_isp->rxd_epoch, 0);
  2154. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
  2155. prev_applied_req = ctx_isp->last_applied_req_id;
  2156. ctx_isp->last_applied_req_id = req->request_id;
  2157. atomic_set(&ctx_isp->apply_in_progress, 1);
  2158. list_del_init(&req->list);
  2159. list_add_tail(&req->list, &ctx->wait_req_list);
  2160. spin_unlock_bh(&ctx->lock);
  2161. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  2162. if (rc) {
  2163. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
  2164. spin_lock_bh(&ctx->lock);
  2165. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2166. ctx_isp->last_applied_req_id = prev_applied_req;
  2167. atomic_set(&ctx_isp->apply_in_progress, 0);
  2168. list_del_init(&req->list);
  2169. list_add(&req->list, &ctx->pending_req_list);
  2170. spin_unlock_bh(&ctx->lock);
  2171. } else {
  2172. atomic_set(&ctx_isp->apply_in_progress, 0);
  2173. CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
  2174. CAM_ISP_CTX_ACTIVATED_APPLIED,
  2175. ctx_isp->last_applied_req_id);
  2176. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2177. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  2178. req->request_id);
  2179. }
  2180. end:
  2181. return rc;
  2182. }
  2183. static int __cam_isp_ctx_schedule_apply_req(
  2184. struct cam_isp_context *ctx_isp)
  2185. {
  2186. int rc = 0;
  2187. struct crm_workq_task *task;
  2188. task = cam_req_mgr_workq_get_task(ctx_isp->workq);
  2189. if (!task) {
  2190. CAM_ERR(CAM_ISP, "No task for worker");
  2191. return -ENOMEM;
  2192. }
  2193. task->process_cb = __cam_isp_ctx_apply_pending_req;
  2194. rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0);
  2195. if (rc)
  2196. CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc);
  2197. return rc;
  2198. }
  2199. static int __cam_isp_ctx_offline_epoch_in_activated_state(
  2200. struct cam_isp_context *ctx_isp, void *evt_data)
  2201. {
  2202. struct cam_context *ctx = ctx_isp->base;
  2203. struct cam_ctx_request *req, *req_temp;
  2204. uint64_t request_id = 0;
  2205. atomic_set(&ctx_isp->rxd_epoch, 1);
  2206. CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
  2207. ctx->ctx_id);
  2208. /*
  2209. * For offline it is not possible for epoch to be generated without
  2210. * RUP done. IRQ scheduling delays can possibly cause this.
  2211. */
  2212. if (list_empty(&ctx->active_req_list)) {
  2213. CAM_WARN(CAM_ISP, "Active list empty on ctx: %u - EPOCH serviced before RUP",
  2214. ctx->ctx_id);
  2215. } else {
  2216. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  2217. if (req->request_id > ctx_isp->reported_req_id) {
  2218. request_id = req->request_id;
  2219. ctx_isp->reported_req_id = request_id;
  2220. break;
  2221. }
  2222. }
  2223. }
  2224. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  2225. /*
  2226. * If no valid request, wait for RUP shutter posted after buf done
  2227. */
  2228. if (request_id)
  2229. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2230. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2231. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2232. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2233. request_id);
  2234. return 0;
  2235. }
  2236. static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
  2237. struct cam_isp_context *ctx_isp, void *evt_data)
  2238. {
  2239. if (ctx_isp->frame_id == 1)
  2240. CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR",
  2241. __cam_isp_ctx_substate_val_to_type(
  2242. ctx_isp->substate_activated));
  2243. else
  2244. CAM_WARN_RATE_LIMIT(CAM_ISP,
  2245. "ctx_id:%d Unexpected reg update in activated Substate[%s] for frame_id:%lld",
  2246. ctx_isp->base->ctx_id,
  2247. __cam_isp_ctx_substate_val_to_type(
  2248. ctx_isp->substate_activated),
  2249. ctx_isp->frame_id);
  2250. return 0;
  2251. }
  2252. static int __cam_isp_ctx_reg_upd_in_applied_state(
  2253. struct cam_isp_context *ctx_isp, void *evt_data)
  2254. {
  2255. int rc = 0;
  2256. struct cam_ctx_request *req;
  2257. struct cam_context *ctx = ctx_isp->base;
  2258. struct cam_isp_ctx_req *req_isp;
  2259. uint64_t request_id = 0;
  2260. if (list_empty(&ctx->wait_req_list)) {
  2261. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  2262. goto end;
  2263. }
  2264. req = list_first_entry(&ctx->wait_req_list,
  2265. struct cam_ctx_request, list);
  2266. list_del_init(&req->list);
  2267. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2268. if (req_isp->num_fence_map_out != 0) {
  2269. list_add_tail(&req->list, &ctx->active_req_list);
  2270. ctx_isp->active_req_cnt++;
  2271. request_id = req->request_id;
  2272. CAM_DBG(CAM_REQ,
  2273. "move request %lld to active list(cnt = %d), ctx %u",
  2274. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2275. __cam_isp_ctx_update_event_record(ctx_isp,
  2276. CAM_ISP_CTX_EVENT_RUP, req);
  2277. } else {
  2278. /* no io config, so the request is completed. */
  2279. list_add_tail(&req->list, &ctx->free_req_list);
  2280. CAM_DBG(CAM_ISP,
  2281. "move active request %lld to free list(cnt = %d), ctx %u",
  2282. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2283. }
  2284. /*
  2285. * This function only called directly from applied and bubble applied
  2286. * state so change substate here.
  2287. */
  2288. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2289. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2290. __cam_isp_ctx_substate_val_to_type(
  2291. ctx_isp->substate_activated));
  2292. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2293. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id);
  2294. end:
  2295. return rc;
  2296. }
  2297. static int __cam_isp_ctx_notify_sof_in_activated_state(
  2298. struct cam_isp_context *ctx_isp, void *evt_data)
  2299. {
  2300. int rc = 0;
  2301. uint64_t request_id = 0;
  2302. struct cam_context *ctx = ctx_isp->base;
  2303. struct cam_ctx_request *req;
  2304. struct cam_isp_ctx_req *req_isp;
  2305. struct cam_hw_cmd_args hw_cmd_args;
  2306. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  2307. uint64_t last_cdm_done_req = 0;
  2308. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2309. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2310. if (!evt_data) {
  2311. CAM_ERR(CAM_ISP, "invalid event data");
  2312. return -EINVAL;
  2313. }
  2314. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2315. if (atomic_read(&ctx_isp->process_bubble)) {
  2316. if (list_empty(&ctx->active_req_list)) {
  2317. CAM_ERR(CAM_ISP,
  2318. "No available active req in bubble");
  2319. atomic_set(&ctx_isp->process_bubble, 0);
  2320. ctx_isp->bubble_frame_cnt = 0;
  2321. rc = -EINVAL;
  2322. return rc;
  2323. }
  2324. if (ctx_isp->last_sof_timestamp ==
  2325. ctx_isp->sof_timestamp_val) {
  2326. CAM_DBG(CAM_ISP,
  2327. "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
  2328. ctx_isp->sof_timestamp_val);
  2329. goto notify_only;
  2330. }
  2331. req = list_first_entry(&ctx->active_req_list,
  2332. struct cam_ctx_request, list);
  2333. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2334. if (ctx_isp->bubble_frame_cnt >= 1 &&
  2335. req_isp->bubble_detected) {
  2336. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2337. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  2338. isp_hw_cmd_args.cmd_type =
  2339. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  2340. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  2341. rc = ctx->hw_mgr_intf->hw_cmd(
  2342. ctx->hw_mgr_intf->hw_mgr_priv,
  2343. &hw_cmd_args);
  2344. if (rc) {
  2345. CAM_ERR(CAM_ISP, "HW command failed");
  2346. return rc;
  2347. }
  2348. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  2349. CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
  2350. last_cdm_done_req);
  2351. if (last_cdm_done_req >= req->request_id) {
  2352. CAM_DBG(CAM_ISP,
  2353. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  2354. req->request_id);
  2355. ctx_isp->bubble_frame_cnt = 0;
  2356. } else {
  2357. CAM_DBG(CAM_ISP,
  2358. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  2359. req->request_id);
  2360. req_isp->num_acked = 0;
  2361. req_isp->num_deferred_acks = 0;
  2362. ctx_isp->bubble_frame_cnt = 0;
  2363. req_isp->bubble_detected = false;
  2364. req_isp->cdm_reset_before_apply = true;
  2365. list_del_init(&req->list);
  2366. list_add(&req->list, &ctx->pending_req_list);
  2367. atomic_set(&ctx_isp->process_bubble, 0);
  2368. ctx_isp->active_req_cnt--;
  2369. CAM_DBG(CAM_REQ,
  2370. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  2371. req->request_id,
  2372. ctx_isp->active_req_cnt, ctx->ctx_id);
  2373. }
  2374. } else if (req_isp->bubble_detected) {
  2375. ctx_isp->bubble_frame_cnt++;
  2376. CAM_DBG(CAM_ISP,
  2377. "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
  2378. req->request_id,
  2379. ctx_isp->bubble_frame_cnt);
  2380. } else {
  2381. CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
  2382. req->request_id);
  2383. }
  2384. }
  2385. notify_only:
  2386. /*
  2387. * notify reqmgr with sof signal. Note, due to scheduling delay
  2388. * we can run into situation that two active requests has already
  2389. * be in the active queue while we try to do the notification.
  2390. * In this case, we need to skip the current notification. This
  2391. * helps the state machine to catch up the delay.
  2392. */
  2393. if (ctx_isp->active_req_cnt <= 2) {
  2394. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2395. list_for_each_entry(req, &ctx->active_req_list, list) {
  2396. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2397. if ((!req_isp->bubble_detected) &&
  2398. (req->request_id > ctx_isp->reported_req_id)) {
  2399. request_id = req->request_id;
  2400. __cam_isp_ctx_update_event_record(ctx_isp,
  2401. CAM_ISP_CTX_EVENT_EPOCH, req);
  2402. break;
  2403. }
  2404. }
  2405. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
  2406. request_id = 0;
  2407. if (request_id != 0)
  2408. ctx_isp->reported_req_id = request_id;
  2409. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2410. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2411. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2412. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  2413. request_id);
  2414. }
  2415. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  2416. return 0;
  2417. }
  2418. static int __cam_isp_ctx_notify_eof_in_activated_state(
  2419. struct cam_isp_context *ctx_isp, void *evt_data)
  2420. {
  2421. int rc = 0;
  2422. /* notify reqmgr with eof signal */
  2423. rc = __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_EOF, ctx_isp);
  2424. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2425. CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0);
  2426. return rc;
  2427. }
  2428. static int __cam_isp_ctx_reg_upd_in_hw_error(
  2429. struct cam_isp_context *ctx_isp, void *evt_data)
  2430. {
  2431. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2432. return 0;
  2433. }
  2434. static int __cam_isp_ctx_sof_in_activated_state(
  2435. struct cam_isp_context *ctx_isp, void *evt_data)
  2436. {
  2437. int rc = 0;
  2438. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2439. struct cam_ctx_request *req = NULL;
  2440. struct cam_context *ctx = ctx_isp->base;
  2441. uint64_t request_id = 0;
  2442. /* First check if there is a valid request in active list */
  2443. list_for_each_entry(req, &ctx->active_req_list, list) {
  2444. if (req->request_id > ctx_isp->reported_req_id) {
  2445. request_id = req->request_id;
  2446. break;
  2447. }
  2448. }
  2449. /*
  2450. * If nothing in active list, current request might have not moved
  2451. * from wait to active list. This could happen if REG_UPDATE to sw
  2452. * is coming immediately after SOF
  2453. */
  2454. if (request_id == 0) {
  2455. req = list_first_entry(&ctx->wait_req_list,
  2456. struct cam_ctx_request, list);
  2457. if (req)
  2458. request_id = req->request_id;
  2459. }
  2460. if (!evt_data) {
  2461. CAM_ERR(CAM_ISP, "in valid sof event data");
  2462. return -EINVAL;
  2463. }
  2464. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2465. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2466. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2467. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u",
  2468. ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);
  2469. return rc;
  2470. }
  2471. static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2472. void *evt_data)
  2473. {
  2474. int rc = 0;
  2475. struct cam_ctx_request *req = NULL;
  2476. struct cam_isp_ctx_req *req_isp;
  2477. struct cam_context *ctx = ctx_isp->base;
  2478. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2479. CAM_DBG(CAM_ISP, "invalid RUP");
  2480. goto end;
  2481. }
  2482. /*
  2483. * This is for the first update. The initial setting will
  2484. * cause the reg_upd in the first frame.
  2485. */
  2486. if (!list_empty(&ctx->wait_req_list)) {
  2487. req = list_first_entry(&ctx->wait_req_list,
  2488. struct cam_ctx_request, list);
  2489. list_del_init(&req->list);
  2490. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2491. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2492. list_add_tail(&req->list, &ctx->free_req_list);
  2493. else
  2494. CAM_ERR(CAM_ISP,
  2495. "receive rup in unexpected state");
  2496. }
  2497. if (req != NULL) {
  2498. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2499. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2500. req->request_id);
  2501. }
  2502. end:
  2503. return rc;
  2504. }
  2505. static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
  2506. void *evt_data)
  2507. {
  2508. uint64_t request_id = 0;
  2509. uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
  2510. struct cam_ctx_request *req;
  2511. struct cam_isp_ctx_req *req_isp;
  2512. struct cam_context *ctx = ctx_isp->base;
  2513. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2514. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2515. if (!evt_data) {
  2516. CAM_ERR(CAM_ISP, "invalid event data");
  2517. return -EINVAL;
  2518. }
  2519. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2520. if (list_empty(&ctx->wait_req_list)) {
  2521. /*
  2522. * If no wait req in epoch, this is an error case.
  2523. * The recovery is to go back to sof state
  2524. */
  2525. CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
  2526. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2527. /* Send SOF event as empty frame*/
  2528. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2529. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2530. __cam_isp_ctx_update_event_record(ctx_isp,
  2531. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2532. goto end;
  2533. }
  2534. /* Update state prior to notifying CRM */
  2535. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2536. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2537. list);
  2538. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2539. req_isp->bubble_detected = true;
  2540. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2541. req_isp->cdm_reset_before_apply = false;
  2542. atomic_set(&ctx_isp->process_bubble, 1);
  2543. CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
  2544. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2545. if (req_isp->bubble_report) {
  2546. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2547. req->request_id, ctx_isp);
  2548. trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
  2549. req->request_id, ctx->ctx_id);
  2550. } else {
  2551. req_isp->bubble_report = 0;
  2552. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2553. req->request_id, ctx->ctx_id);
  2554. if (ctx_isp->active_req_cnt <= 1)
  2555. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2556. }
  2557. /*
  2558. * Always move the request to active list. Let buf done
  2559. * function handles the rest.
  2560. */
  2561. list_del_init(&req->list);
  2562. list_add_tail(&req->list, &ctx->active_req_list);
  2563. ctx_isp->active_req_cnt++;
  2564. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
  2565. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2566. /*
  2567. * Handle the deferred buf done after moving
  2568. * the bubble req to active req list.
  2569. */
  2570. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2571. ctx_isp, req);
  2572. /*
  2573. * Update the record before req pointer to
  2574. * other invalid req.
  2575. */
  2576. __cam_isp_ctx_update_event_record(ctx_isp,
  2577. CAM_ISP_CTX_EVENT_EPOCH, req);
  2578. /*
  2579. * Get the req again from active_req_list in case
  2580. * the active req cnt is 2.
  2581. */
  2582. list_for_each_entry(req, &ctx->active_req_list, list) {
  2583. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2584. if ((!req_isp->bubble_report) &&
  2585. (req->request_id > ctx_isp->reported_req_id)) {
  2586. request_id = req->request_id;
  2587. ctx_isp->reported_req_id = request_id;
  2588. CAM_DBG(CAM_ISP,
  2589. "ctx %d reported_req_id update to %lld",
  2590. ctx->ctx_id, ctx_isp->reported_req_id);
  2591. break;
  2592. }
  2593. }
  2594. if ((request_id != 0) && req_isp->bubble_detected)
  2595. sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR;
  2596. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2597. sof_event_status);
  2598. cam_req_mgr_debug_delay_detect();
  2599. trace_cam_delay_detect("ISP",
  2600. "bubble epoch_in_applied", req->request_id,
  2601. ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
  2602. CAM_DEFAULT_VALUE);
  2603. end:
  2604. if (request_id == 0) {
  2605. req = list_last_entry(&ctx->active_req_list,
  2606. struct cam_ctx_request, list);
  2607. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2608. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2609. } else {
  2610. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2611. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
  2612. }
  2613. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2614. __cam_isp_ctx_substate_val_to_type(
  2615. ctx_isp->substate_activated));
  2616. return 0;
  2617. }
  2618. static int __cam_isp_ctx_buf_done_in_sof(struct cam_isp_context *ctx_isp,
  2619. void *evt_data)
  2620. {
  2621. int rc = 0;
  2622. struct cam_isp_hw_done_event_data *done =
  2623. (struct cam_isp_hw_done_event_data *) evt_data;
  2624. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2625. return rc;
  2626. }
  2627. static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
  2628. void *evt_data)
  2629. {
  2630. int rc = 0;
  2631. struct cam_isp_hw_done_event_data *done =
  2632. (struct cam_isp_hw_done_event_data *) evt_data;
  2633. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2634. return rc;
  2635. }
  2636. static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
  2637. void *evt_data)
  2638. {
  2639. int rc = 0;
  2640. struct cam_context *ctx = ctx_isp->base;
  2641. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2642. struct cam_ctx_request *req;
  2643. if (!evt_data) {
  2644. CAM_ERR(CAM_ISP, "in valid sof event data");
  2645. return -EINVAL;
  2646. }
  2647. if (atomic_read(&ctx_isp->apply_in_progress))
  2648. CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
  2649. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  2650. if (list_empty(&ctx->active_req_list))
  2651. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  2652. else
  2653. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  2654. req = list_last_entry(&ctx->active_req_list,
  2655. struct cam_ctx_request, list);
  2656. if (req)
  2657. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2658. CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
  2659. req->request_id);
  2660. if (ctx_isp->frame_id == 1)
  2661. CAM_INFO(CAM_ISP,
  2662. "First SOF in EPCR ctx:%d frame_id:%lld next substate %s",
  2663. ctx->ctx_id, ctx_isp->frame_id,
  2664. __cam_isp_ctx_substate_val_to_type(
  2665. ctx_isp->substate_activated));
  2666. CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
  2667. ctx->ctx_id, ctx_isp->frame_id,
  2668. __cam_isp_ctx_substate_val_to_type(
  2669. ctx_isp->substate_activated));
  2670. return rc;
  2671. }
  2672. static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2673. void *evt_data)
  2674. {
  2675. int rc = 0;
  2676. struct cam_isp_hw_done_event_data *done =
  2677. (struct cam_isp_hw_done_event_data *) evt_data;
  2678. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2679. return rc;
  2680. }
  2681. static int __cam_isp_ctx_buf_done_in_bubble(
  2682. struct cam_isp_context *ctx_isp, void *evt_data)
  2683. {
  2684. int rc = 0;
  2685. struct cam_isp_hw_done_event_data *done =
  2686. (struct cam_isp_hw_done_event_data *) evt_data;
  2687. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2688. return rc;
  2689. }
  2690. static int __cam_isp_ctx_epoch_in_bubble_applied(
  2691. struct cam_isp_context *ctx_isp, void *evt_data)
  2692. {
  2693. uint64_t request_id = 0;
  2694. struct cam_ctx_request *req;
  2695. struct cam_isp_ctx_req *req_isp;
  2696. struct cam_context *ctx = ctx_isp->base;
  2697. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  2698. (struct cam_isp_hw_epoch_event_data *)evt_data;
  2699. if (!evt_data) {
  2700. CAM_ERR(CAM_ISP, "invalid event data");
  2701. return -EINVAL;
  2702. }
  2703. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  2704. /*
  2705. * This means we missed the reg upd ack. So we need to
  2706. * transition to BUBBLE state again.
  2707. */
  2708. if (list_empty(&ctx->wait_req_list)) {
  2709. /*
  2710. * If no pending req in epoch, this is an error case.
  2711. * Just go back to the bubble state.
  2712. */
  2713. CAM_ERR(CAM_ISP, "ctx:%d No pending request.", ctx->ctx_id);
  2714. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2715. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2716. __cam_isp_ctx_update_event_record(ctx_isp,
  2717. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2718. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2719. goto end;
  2720. }
  2721. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  2722. list);
  2723. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2724. req_isp->bubble_detected = true;
  2725. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  2726. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  2727. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2728. req_isp->cdm_reset_before_apply = false;
  2729. if (req_isp->bubble_report) {
  2730. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  2731. req->request_id, ctx_isp);
  2732. atomic_set(&ctx_isp->process_bubble, 1);
  2733. } else {
  2734. req_isp->bubble_report = 0;
  2735. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2736. req->request_id, ctx->ctx_id);
  2737. if (ctx_isp->active_req_cnt <= 1)
  2738. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  2739. atomic_set(&ctx_isp->process_bubble, 1);
  2740. }
  2741. /*
  2742. * Always move the request to active list. Let buf done
  2743. * function handles the rest.
  2744. */
  2745. list_del_init(&req->list);
  2746. list_add_tail(&req->list, &ctx->active_req_list);
  2747. ctx_isp->active_req_cnt++;
  2748. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u",
  2749. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2750. /*
  2751. * Handle the deferred buf done after moving
  2752. * the bubble req to active req list.
  2753. */
  2754. __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
  2755. ctx_isp, req);
  2756. if (!req_isp->bubble_detected) {
  2757. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2758. list);
  2759. req_isp->bubble_detected = true;
  2760. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  2761. req_isp->cdm_reset_before_apply = false;
  2762. atomic_set(&ctx_isp->process_bubble, 1);
  2763. list_del_init(&req->list);
  2764. list_add_tail(&req->list, &ctx->active_req_list);
  2765. ctx_isp->active_req_cnt++;
  2766. }
  2767. if (!req_isp->bubble_report) {
  2768. if (req->request_id > ctx_isp->reported_req_id) {
  2769. request_id = req->request_id;
  2770. ctx_isp->reported_req_id = request_id;
  2771. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2772. CAM_REQ_MGR_SOF_EVENT_ERROR);
  2773. __cam_isp_ctx_update_event_record(ctx_isp,
  2774. CAM_ISP_CTX_EVENT_EPOCH, req);
  2775. } else {
  2776. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2777. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2778. __cam_isp_ctx_update_event_record(ctx_isp,
  2779. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2780. }
  2781. } else {
  2782. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2783. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2784. __cam_isp_ctx_update_event_record(ctx_isp,
  2785. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2786. }
  2787. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2788. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2789. __cam_isp_ctx_substate_val_to_type(
  2790. ctx_isp->substate_activated));
  2791. cam_req_mgr_debug_delay_detect();
  2792. trace_cam_delay_detect("ISP",
  2793. "bubble epoch_in_bubble_applied",
  2794. req->request_id, ctx->ctx_id,
  2795. ctx->link_hdl, ctx->session_hdl,
  2796. CAM_DEFAULT_VALUE);
  2797. end:
  2798. req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
  2799. list);
  2800. if (req)
  2801. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2802. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2803. return 0;
  2804. }
  2805. static int __cam_isp_ctx_buf_done_in_bubble_applied(
  2806. struct cam_isp_context *ctx_isp, void *evt_data)
  2807. {
  2808. int rc = 0;
  2809. struct cam_isp_hw_done_event_data *done =
  2810. (struct cam_isp_hw_done_event_data *) evt_data;
  2811. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2812. return rc;
  2813. }
  2814. static void __cam_isp_get_notification_evt_params(
  2815. uint32_t hw_error, uint32_t *fence_evt_cause,
  2816. uint32_t *req_mgr_err_code, uint32_t *recovery_type)
  2817. {
  2818. uint32_t err_type, err_code = 0, recovery_type_temp;
  2819. err_type = CAM_SYNC_ISP_EVENT_UNKNOWN;
  2820. recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2821. if (hw_error & CAM_ISP_HW_ERROR_OVERFLOW) {
  2822. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2823. err_type = CAM_SYNC_ISP_EVENT_OVERFLOW;
  2824. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2825. }
  2826. if (hw_error & CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW) {
  2827. err_code |= CAM_REQ_MGR_CSID_FIFO_OVERFLOW_ERROR;
  2828. err_type = CAM_SYNC_ISP_EVENT_CSID_OUTPUT_FIFO_OVERFLOW;
  2829. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2830. }
  2831. if (hw_error & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) {
  2832. err_code |= CAM_REQ_MGR_CSID_RECOVERY_OVERFLOW_ERROR;
  2833. err_type = CAM_SYNC_ISP_EVENT_RECOVERY_OVERFLOW;
  2834. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2835. }
  2836. if (hw_error & CAM_ISP_HW_ERROR_P2I_ERROR) {
  2837. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2838. err_type = CAM_SYNC_ISP_EVENT_P2I_ERROR;
  2839. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2840. }
  2841. if (hw_error & CAM_ISP_HW_ERROR_VIOLATION) {
  2842. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2843. err_type = CAM_SYNC_ISP_EVENT_VIOLATION;
  2844. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2845. }
  2846. if (hw_error & CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) {
  2847. err_code |= CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2848. err_type = CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW;
  2849. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2850. }
  2851. if (hw_error & CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR) {
  2852. err_code |= CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING;
  2853. err_type = CAM_SYNC_ISP_EVENT_CSID_SENSOR_SWITCH_ERROR;
  2854. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2855. }
  2856. if (hw_error & CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW) {
  2857. err_code |= CAM_REQ_MGR_CSID_LANE_FIFO_OVERFLOW_ERROR;
  2858. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2859. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2860. }
  2861. if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED) {
  2862. err_code |= CAM_REQ_MGR_CSID_RX_PKT_HDR_CORRUPTION;
  2863. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2864. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2865. }
  2866. if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA) {
  2867. err_code |= CAM_REQ_MGR_CSID_MISSING_PKT_HDR_DATA;
  2868. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2869. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2870. }
  2871. if (hw_error & CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME) {
  2872. err_code |= CAM_REQ_MGR_CSID_UNBOUNDED_FRAME;
  2873. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2874. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2875. }
  2876. if (hw_error & CAM_ISP_HW_ERROR_CSID_FRAME_SIZE) {
  2877. err_code |= CAM_REQ_MGR_CSID_PIXEL_COUNT_MISMATCH;
  2878. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2879. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2880. }
  2881. if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_EOT) {
  2882. err_code |= CAM_REQ_MGR_CSID_MISSING_EOT;
  2883. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2884. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2885. }
  2886. if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED) {
  2887. err_code |= CAM_REQ_MGR_CSID_RX_PKT_PAYLOAD_CORRUPTION;
  2888. err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR;
  2889. recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2890. }
  2891. if (recovery_type_temp == (CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY |
  2892. CAM_REQ_MGR_ERROR_TYPE_RECOVERY))
  2893. recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2894. if (!err_code)
  2895. err_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR;
  2896. *req_mgr_err_code = err_code;
  2897. *fence_evt_cause = err_type;
  2898. *recovery_type = recovery_type_temp;
  2899. }
  2900. static bool __cam_isp_ctx_request_can_reapply(
  2901. struct cam_isp_ctx_req *req_isp)
  2902. {
  2903. int i;
  2904. for (i = 0; i < req_isp->num_fence_map_out; i++)
  2905. if (req_isp->fence_map_out[i].sync_id == -1)
  2906. return false;
  2907. return true;
  2908. }
  2909. static int __cam_isp_ctx_validate_for_req_reapply_util(
  2910. struct cam_isp_context *ctx_isp)
  2911. {
  2912. int rc = 0;
  2913. struct cam_ctx_request *req_temp;
  2914. struct cam_ctx_request *req = NULL;
  2915. struct cam_isp_ctx_req *req_isp = NULL;
  2916. struct cam_context *ctx = ctx_isp->base;
  2917. /* Check for req in active/wait lists */
  2918. if (list_empty(&ctx->active_req_list)) {
  2919. CAM_DBG(CAM_ISP,
  2920. "Active request list empty for ctx: %u on link: 0x%x",
  2921. ctx->ctx_id, ctx->link_hdl);
  2922. if (list_empty(&ctx->wait_req_list)) {
  2923. CAM_WARN(CAM_ISP,
  2924. "No active/wait req for ctx: %u on link: 0x%x",
  2925. ctx->ctx_id, ctx->link_hdl);
  2926. rc = -EINVAL;
  2927. goto end;
  2928. }
  2929. }
  2930. /* Validate if all fences for active requests are not signaled */
  2931. if (!list_empty(&ctx->active_req_list)) {
  2932. list_for_each_entry_safe_reverse(req, req_temp,
  2933. &ctx->active_req_list, list) {
  2934. /*
  2935. * If some fences of the active request are already
  2936. * signaled, we should not do recovery for the buffer
  2937. * and timestamp consistency.
  2938. */
  2939. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2940. if (!__cam_isp_ctx_request_can_reapply(req_isp)) {
  2941. CAM_WARN(CAM_ISP,
  2942. "Req: %llu in ctx:%u on link: 0x%x fence has partially signaled, cannot do recovery",
  2943. req->request_id, ctx->ctx_id, ctx->link_hdl);
  2944. rc = -EINVAL;
  2945. goto end;
  2946. }
  2947. }
  2948. }
  2949. /* Move active requests to pending list */
  2950. if (!list_empty(&ctx->active_req_list)) {
  2951. list_for_each_entry_safe_reverse(req, req_temp,
  2952. &ctx->active_req_list, list) {
  2953. list_del_init(&req->list);
  2954. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  2955. ctx_isp->active_req_cnt--;
  2956. CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move active req %llu to pending",
  2957. ctx->ctx_id, ctx->link_hdl, req->request_id);
  2958. }
  2959. }
  2960. /* Move wait requests to pending list */
  2961. if (!list_empty(&ctx->wait_req_list)) {
  2962. list_for_each_entry_safe_reverse(req, req_temp, &ctx->wait_req_list, list) {
  2963. list_del_init(&req->list);
  2964. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  2965. CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move wait req %llu to pending",
  2966. ctx->ctx_id, ctx->link_hdl, req->request_id);
  2967. }
  2968. }
  2969. end:
  2970. return rc;
  2971. }
  2972. static int __cam_isp_ctx_handle_recovery_req_util(
  2973. struct cam_isp_context *ctx_isp)
  2974. {
  2975. int rc = 0;
  2976. struct cam_context *ctx = ctx_isp->base;
  2977. struct cam_ctx_request *req_to_reapply = NULL;
  2978. struct cam_isp_ctx_req *req_isp = NULL;
  2979. req_to_reapply = list_first_entry(&ctx->pending_req_list,
  2980. struct cam_ctx_request, list);
  2981. req_isp = (struct cam_isp_ctx_req *)req_to_reapply->req_priv;
  2982. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2983. ctx_isp->recovery_req_id = req_to_reapply->request_id;
  2984. atomic_set(&ctx_isp->internal_recovery_set, 1);
  2985. CAM_INFO(CAM_ISP, "Notify CRM to reapply req:%llu for ctx:%u link:0x%x",
  2986. req_to_reapply->request_id, ctx->ctx_id, ctx->link_hdl);
  2987. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF,
  2988. CRM_KMD_WARN_INTERNAL_RECOVERY, req_to_reapply->request_id,
  2989. ctx_isp);
  2990. if (rc) {
  2991. /* Unable to notify CRM to do reapply back to normal */
  2992. CAM_WARN(CAM_ISP,
  2993. "ctx:%u unable to notify CRM for req %llu",
  2994. ctx->ctx_id, ctx_isp->recovery_req_id);
  2995. ctx_isp->recovery_req_id = 0;
  2996. atomic_set(&ctx_isp->internal_recovery_set, 0);
  2997. }
  2998. return rc;
  2999. }
  3000. static int __cam_isp_ctx_trigger_error_req_reapply(
  3001. uint32_t err_type, struct cam_isp_context *ctx_isp)
  3002. {
  3003. int rc = 0;
  3004. struct cam_context *ctx = ctx_isp->base;
  3005. if ((err_type & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) &&
  3006. (isp_ctx_debug.disable_internal_recovery_mask &
  3007. CAM_ISP_CTX_DISABLE_RECOVERY_BUS_OVERFLOW))
  3008. return -EINVAL;
  3009. /*
  3010. * For errors that can be recoverable within kmd, we
  3011. * try to do internal hw stop, restart and notify CRM
  3012. * to do reapply with the help of bubble control flow.
  3013. */
  3014. rc = __cam_isp_ctx_validate_for_req_reapply_util(ctx_isp);
  3015. if (rc)
  3016. goto end;
  3017. rc = __cam_isp_ctx_handle_recovery_req_util(ctx_isp);
  3018. if (rc)
  3019. goto end;
  3020. CAM_DBG(CAM_ISP, "Triggered internal recovery for req:%llu ctx:%u on link 0x%x",
  3021. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3022. end:
  3023. return rc;
  3024. }
  3025. static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
  3026. void *evt_data)
  3027. {
  3028. int rc = 0;
  3029. enum cam_req_mgr_device_error error;
  3030. uint32_t i = 0;
  3031. bool found = 0;
  3032. struct cam_ctx_request *req = NULL;
  3033. struct cam_ctx_request *req_to_report = NULL;
  3034. struct cam_ctx_request *req_to_dump = NULL;
  3035. struct cam_ctx_request *req_temp;
  3036. struct cam_isp_ctx_req *req_isp = NULL;
  3037. struct cam_isp_ctx_req *req_isp_to_report = NULL;
  3038. uint64_t error_request_id;
  3039. struct cam_hw_fence_map_entry *fence_map_out = NULL;
  3040. uint32_t recovery_type, fence_evt_cause;
  3041. uint32_t req_mgr_err_code;
  3042. struct cam_context *ctx = ctx_isp->base;
  3043. struct cam_isp_hw_error_event_data *error_event_data =
  3044. (struct cam_isp_hw_error_event_data *)evt_data;
  3045. CAM_DBG(CAM_ISP, "Enter HW error_type = %d", error_event_data->error_type);
  3046. if (error_event_data->try_internal_recovery) {
  3047. rc = __cam_isp_ctx_trigger_error_req_reapply(error_event_data->error_type, ctx_isp);
  3048. if (!rc)
  3049. goto exit;
  3050. }
  3051. if (!ctx_isp->offline_context)
  3052. __cam_isp_ctx_pause_crm_timer(ctx);
  3053. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3054. __cam_isp_get_notification_evt_params(error_event_data->error_type,
  3055. &fence_evt_cause, &req_mgr_err_code, &recovery_type);
  3056. /*
  3057. * The error is likely caused by first request on the active list.
  3058. * If active list is empty check wait list (maybe error hit as soon
  3059. * as RUP and we handle error before RUP.
  3060. */
  3061. if (list_empty(&ctx->active_req_list)) {
  3062. CAM_DBG(CAM_ISP,
  3063. "handling error with no active request");
  3064. if (list_empty(&ctx->wait_req_list)) {
  3065. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3066. "Error with no active/wait request");
  3067. goto end;
  3068. } else {
  3069. req_to_dump = list_first_entry(&ctx->wait_req_list,
  3070. struct cam_ctx_request, list);
  3071. }
  3072. } else {
  3073. req_to_dump = list_first_entry(&ctx->active_req_list,
  3074. struct cam_ctx_request, list);
  3075. }
  3076. req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
  3077. if (error_event_data->enable_req_dump)
  3078. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  3079. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3080. CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
  3081. list_for_each_entry_safe(req, req_temp,
  3082. &ctx->active_req_list, list) {
  3083. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3084. if (!req_isp->bubble_report) {
  3085. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  3086. req->request_id);
  3087. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3088. fence_map_out =
  3089. &req_isp->fence_map_out[i];
  3090. if (req_isp->fence_map_out[i].sync_id != -1) {
  3091. CAM_DBG(CAM_ISP,
  3092. "req %llu, Sync fd 0x%x ctx %u",
  3093. req->request_id,
  3094. req_isp->fence_map_out[i].sync_id,
  3095. ctx->ctx_id);
  3096. rc = cam_sync_signal(
  3097. fence_map_out->sync_id,
  3098. CAM_SYNC_STATE_SIGNALED_ERROR,
  3099. fence_evt_cause);
  3100. fence_map_out->sync_id = -1;
  3101. }
  3102. }
  3103. list_del_init(&req->list);
  3104. list_add_tail(&req->list, &ctx->free_req_list);
  3105. ctx_isp->active_req_cnt--;
  3106. } else {
  3107. found = 1;
  3108. break;
  3109. }
  3110. }
  3111. if (found)
  3112. goto move_to_pending;
  3113. list_for_each_entry_safe(req, req_temp,
  3114. &ctx->wait_req_list, list) {
  3115. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3116. if (!req_isp->bubble_report) {
  3117. CAM_ERR(CAM_ISP, "signalled error for req %llu",
  3118. req->request_id);
  3119. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3120. fence_map_out =
  3121. &req_isp->fence_map_out[i];
  3122. if (req_isp->fence_map_out[i].sync_id != -1) {
  3123. CAM_DBG(CAM_ISP,
  3124. "req %llu, Sync fd 0x%x ctx %u",
  3125. req->request_id,
  3126. req_isp->fence_map_out[i].sync_id,
  3127. ctx->ctx_id);
  3128. rc = cam_sync_signal(
  3129. fence_map_out->sync_id,
  3130. CAM_SYNC_STATE_SIGNALED_ERROR,
  3131. fence_evt_cause);
  3132. fence_map_out->sync_id = -1;
  3133. }
  3134. }
  3135. list_del_init(&req->list);
  3136. list_add_tail(&req->list, &ctx->free_req_list);
  3137. } else {
  3138. found = 1;
  3139. break;
  3140. }
  3141. }
  3142. move_to_pending:
  3143. /*
  3144. * If bubble recovery is enabled on any request we need to move that
  3145. * request and all the subsequent requests to the pending list.
  3146. * Note:
  3147. * We need to traverse the active list in reverse order and add
  3148. * to head of pending list.
  3149. * e.g. pending current state: 10, 11 | active current state: 8, 9
  3150. * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
  3151. * final state - pending: 8, 9, 10, 11 | active: NULL
  3152. */
  3153. if (found) {
  3154. list_for_each_entry_safe_reverse(req, req_temp,
  3155. &ctx->active_req_list, list) {
  3156. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3157. list_del_init(&req->list);
  3158. list_add(&req->list, &ctx->pending_req_list);
  3159. ctx_isp->active_req_cnt--;
  3160. }
  3161. list_for_each_entry_safe_reverse(req, req_temp,
  3162. &ctx->wait_req_list, list) {
  3163. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3164. list_del_init(&req->list);
  3165. list_add(&req->list, &ctx->pending_req_list);
  3166. }
  3167. }
  3168. end:
  3169. do {
  3170. if (list_empty(&ctx->pending_req_list)) {
  3171. error_request_id = ctx_isp->last_applied_req_id;
  3172. req_isp = NULL;
  3173. break;
  3174. }
  3175. req = list_first_entry(&ctx->pending_req_list,
  3176. struct cam_ctx_request, list);
  3177. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3178. error_request_id = ctx_isp->last_applied_req_id;
  3179. if (req_isp->bubble_report) {
  3180. req_to_report = req;
  3181. req_isp_to_report = req_to_report->req_priv;
  3182. break;
  3183. }
  3184. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3185. if (req_isp->fence_map_out[i].sync_id != -1)
  3186. rc = cam_sync_signal(
  3187. req_isp->fence_map_out[i].sync_id,
  3188. CAM_SYNC_STATE_SIGNALED_ERROR,
  3189. fence_evt_cause);
  3190. req_isp->fence_map_out[i].sync_id = -1;
  3191. }
  3192. list_del_init(&req->list);
  3193. list_add_tail(&req->list, &ctx->free_req_list);
  3194. } while (req->request_id < ctx_isp->last_applied_req_id);
  3195. if (ctx_isp->offline_context)
  3196. goto exit;
  3197. error = CRM_KMD_ERR_FATAL;
  3198. if (req_isp_to_report && req_isp_to_report->bubble_report)
  3199. if (error_event_data->recovery_enabled)
  3200. error = CRM_KMD_ERR_BUBBLE;
  3201. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, error,
  3202. error_request_id, ctx_isp);
  3203. /*
  3204. * Need to send error occurred in KMD
  3205. * This will help UMD to take necessary action
  3206. * and to dump relevant info
  3207. */
  3208. if (error == CRM_KMD_ERR_FATAL)
  3209. __cam_isp_ctx_notify_v4l2_error_event(recovery_type,
  3210. req_mgr_err_code, error_request_id, ctx);
  3211. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
  3212. CAM_DBG(CAM_ISP, "Handling error done on ctx: %u", ctx->ctx_id);
  3213. exit:
  3214. return rc;
  3215. }
  3216. static int __cam_isp_ctx_fs2_sof_in_sof_state(
  3217. struct cam_isp_context *ctx_isp, void *evt_data)
  3218. {
  3219. int rc = 0;
  3220. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3221. struct cam_ctx_request *req;
  3222. struct cam_context *ctx = ctx_isp->base;
  3223. uint64_t request_id = 0;
  3224. if (!evt_data) {
  3225. CAM_ERR(CAM_ISP, "in valid sof event data");
  3226. return -EINVAL;
  3227. }
  3228. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  3229. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3230. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3231. if (!(list_empty(&ctx->wait_req_list)))
  3232. goto end;
  3233. if (ctx_isp->active_req_cnt <= 2) {
  3234. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3235. list_for_each_entry(req, &ctx->active_req_list, list) {
  3236. if (req->request_id > ctx_isp->reported_req_id) {
  3237. request_id = req->request_id;
  3238. ctx_isp->reported_req_id = request_id;
  3239. break;
  3240. }
  3241. }
  3242. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3243. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3244. }
  3245. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3246. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  3247. end:
  3248. return rc;
  3249. }
  3250. static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp,
  3251. void *evt_data)
  3252. {
  3253. int rc = 0;
  3254. struct cam_isp_hw_done_event_data *done =
  3255. (struct cam_isp_hw_done_event_data *) evt_data;
  3256. struct cam_context *ctx = ctx_isp->base;
  3257. int prev_active_req_cnt = 0;
  3258. int curr_req_id = 0;
  3259. struct cam_ctx_request *req;
  3260. prev_active_req_cnt = ctx_isp->active_req_cnt;
  3261. req = list_first_entry(&ctx->active_req_list,
  3262. struct cam_ctx_request, list);
  3263. if (req)
  3264. curr_req_id = req->request_id;
  3265. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  3266. if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) {
  3267. if (list_empty(&ctx->wait_req_list) &&
  3268. list_empty(&ctx->active_req_list)) {
  3269. CAM_DBG(CAM_ISP, "No request, move to SOF");
  3270. ctx_isp->substate_activated =
  3271. CAM_ISP_CTX_ACTIVATED_SOF;
  3272. if (ctx_isp->reported_req_id < curr_req_id) {
  3273. ctx_isp->reported_req_id = curr_req_id;
  3274. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  3275. curr_req_id,
  3276. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3277. }
  3278. }
  3279. }
  3280. return rc;
  3281. }
  3282. static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  3283. void *evt_data)
  3284. {
  3285. int rc = 0;
  3286. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  3287. return rc;
  3288. }
  3289. static int __cam_isp_ctx_fs2_buf_done_in_applied(
  3290. struct cam_isp_context *ctx_isp,
  3291. void *evt_data)
  3292. {
  3293. int rc = 0;
  3294. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  3295. return rc;
  3296. }
  3297. static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  3298. void *evt_data)
  3299. {
  3300. int rc = 0;
  3301. struct cam_ctx_request *req = NULL;
  3302. struct cam_isp_ctx_req *req_isp;
  3303. struct cam_context *ctx = ctx_isp->base;
  3304. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  3305. CAM_DBG(CAM_ISP, "invalid RUP");
  3306. goto end;
  3307. }
  3308. /*
  3309. * This is for the first update. The initial setting will
  3310. * cause the reg_upd in the first frame.
  3311. */
  3312. if (!list_empty(&ctx->wait_req_list)) {
  3313. req = list_first_entry(&ctx->wait_req_list,
  3314. struct cam_ctx_request, list);
  3315. list_del_init(&req->list);
  3316. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3317. if (req_isp->num_fence_map_out == req_isp->num_acked)
  3318. list_add_tail(&req->list, &ctx->free_req_list);
  3319. else
  3320. CAM_ERR(CAM_ISP,
  3321. "receive rup in unexpected state");
  3322. }
  3323. if (req != NULL) {
  3324. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3325. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3326. req->request_id);
  3327. }
  3328. end:
  3329. return rc;
  3330. }
  3331. static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
  3332. struct cam_isp_context *ctx_isp, void *evt_data)
  3333. {
  3334. int rc = 0;
  3335. struct cam_ctx_request *req = NULL;
  3336. struct cam_context *ctx = ctx_isp->base;
  3337. struct cam_isp_ctx_req *req_isp;
  3338. uint64_t request_id = 0;
  3339. if (list_empty(&ctx->wait_req_list)) {
  3340. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  3341. goto end;
  3342. }
  3343. req = list_first_entry(&ctx->wait_req_list,
  3344. struct cam_ctx_request, list);
  3345. list_del_init(&req->list);
  3346. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3347. if (req_isp->num_fence_map_out != 0) {
  3348. list_add_tail(&req->list, &ctx->active_req_list);
  3349. ctx_isp->active_req_cnt++;
  3350. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
  3351. req->request_id, ctx_isp->active_req_cnt);
  3352. } else {
  3353. /* no io config, so the request is completed. */
  3354. list_add_tail(&req->list, &ctx->free_req_list);
  3355. }
  3356. /*
  3357. * This function only called directly from applied and bubble applied
  3358. * state so change substate here.
  3359. */
  3360. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  3361. if (req_isp->num_fence_map_out != 1)
  3362. goto end;
  3363. if (ctx_isp->active_req_cnt <= 2) {
  3364. list_for_each_entry(req, &ctx->active_req_list, list) {
  3365. if (req->request_id > ctx_isp->reported_req_id) {
  3366. request_id = req->request_id;
  3367. ctx_isp->reported_req_id = request_id;
  3368. break;
  3369. }
  3370. }
  3371. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3372. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3373. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  3374. }
  3375. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3376. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated));
  3377. end:
  3378. if (req != NULL && !rc) {
  3379. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3380. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  3381. req->request_id);
  3382. }
  3383. return rc;
  3384. }
  3385. static void __cam_isp_ctx_notify_aeb_error_for_sec_event(
  3386. struct cam_isp_context *ctx_isp)
  3387. {
  3388. struct cam_context *ctx = ctx_isp->base;
  3389. if ((++ctx_isp->aeb_error_cnt) <= CAM_ISP_CONTEXT_AEB_ERROR_CNT_MAX) {
  3390. CAM_WARN(CAM_ISP,
  3391. "AEB slave RDI's current request's SOF seen after next req is applied for ctx: %u on link: 0x%x last_applied_req: %llu err_cnt: %u",
  3392. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, ctx_isp->aeb_error_cnt);
  3393. return;
  3394. }
  3395. CAM_ERR(CAM_ISP,
  3396. "Fatal - AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x err_cnt: %u",
  3397. ctx->ctx_id, ctx->link_hdl, ctx_isp->aeb_error_cnt);
  3398. /* Pause CRM timer */
  3399. if (!ctx_isp->offline_context)
  3400. __cam_isp_ctx_pause_crm_timer(ctx);
  3401. /* Trigger reg dump */
  3402. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  3403. /* Notify CRM on fatal error */
  3404. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL,
  3405. ctx_isp->last_applied_req_id, ctx_isp);
  3406. /* Notify userland on error */
  3407. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY,
  3408. CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx);
  3409. /* Change state to HALT, stop further processing of HW events */
  3410. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  3411. }
  3412. static int __cam_isp_ctx_trigger_internal_recovery(
  3413. bool sync_frame_drop, struct cam_isp_context *ctx_isp)
  3414. {
  3415. int rc = 0;
  3416. bool do_recovery = true;
  3417. struct cam_context *ctx = ctx_isp->base;
  3418. struct cam_ctx_request *req = NULL;
  3419. struct cam_isp_ctx_req *req_isp = NULL;
  3420. if (list_empty(&ctx->wait_req_list)) {
  3421. /*
  3422. * If the wait list is empty, and we encounter a "silent" frame drop
  3423. * then the settings applied on the previous frame, did not reflect
  3424. * at the next frame boundary, it's expected to latch a frame after.
  3425. * No need to recover. If it's an out of sync drop use pending req
  3426. */
  3427. if (sync_frame_drop && !list_empty(&ctx->pending_req_list))
  3428. req = list_first_entry(&ctx->pending_req_list,
  3429. struct cam_ctx_request, list);
  3430. else
  3431. do_recovery = false;
  3432. }
  3433. /* If both wait and pending list have no request to recover on */
  3434. if (!do_recovery) {
  3435. CAM_WARN(CAM_ISP,
  3436. "No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld",
  3437. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id,
  3438. ctx_isp->req_info.last_bufdone_req_id);
  3439. goto end;
  3440. }
  3441. if (!req) {
  3442. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list);
  3443. if (req->request_id != ctx_isp->last_applied_req_id)
  3444. CAM_WARN(CAM_ISP,
  3445. "Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x",
  3446. req->request_id, ctx_isp->last_applied_req_id,
  3447. ctx->ctx_id, ctx->link_hdl);
  3448. }
  3449. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3450. /*
  3451. * Treat this as bubble, after recovery re-start from appropriate sub-state
  3452. * This will block servicing any further apply calls from CRM
  3453. */
  3454. atomic_set(&ctx_isp->internal_recovery_set, 1);
  3455. atomic_set(&ctx_isp->process_bubble, 1);
  3456. ctx_isp->recovery_req_id = req->request_id;
  3457. /* Wait for active request's to finish before issuing recovery */
  3458. if (ctx_isp->active_req_cnt) {
  3459. req_isp->bubble_detected = true;
  3460. CAM_WARN(CAM_ISP,
  3461. "Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x",
  3462. ctx_isp->active_req_cnt, ctx_isp->recovery_req_id,
  3463. ctx->ctx_id, ctx->link_hdl);
  3464. } else {
  3465. rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  3466. ctx_isp->recovery_req_id, ctx_isp);
  3467. if (rc) {
  3468. /* Unable to do bubble recovery reset back to normal */
  3469. CAM_WARN(CAM_ISP,
  3470. "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x",
  3471. ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl);
  3472. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  3473. goto end;
  3474. }
  3475. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  3476. list_del_init(&req->list);
  3477. list_add(&req->list, &ctx->pending_req_list);
  3478. }
  3479. end:
  3480. return rc;
  3481. }
  3482. static int __cam_isp_ctx_handle_secondary_events(
  3483. struct cam_isp_context *ctx_isp, void *evt_data)
  3484. {
  3485. int rc = 0;
  3486. bool recover = false, sync_frame_drop = false;
  3487. struct cam_context *ctx = ctx_isp->base;
  3488. struct cam_isp_hw_secondary_event_data *sec_evt_data =
  3489. (struct cam_isp_hw_secondary_event_data *)evt_data;
  3490. /* Current scheme to handle only for custom AEB */
  3491. if (!ctx_isp->aeb_enabled) {
  3492. CAM_WARN(CAM_ISP,
  3493. "Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u",
  3494. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3495. goto end;
  3496. }
  3497. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3498. CAM_WARN(CAM_ISP,
  3499. "Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u",
  3500. ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type);
  3501. goto end;
  3502. }
  3503. /*
  3504. * In case of custom AEB ensure first exposure frame has
  3505. * not moved forward with its settings without second/third
  3506. * expoure frame coming in. Also track for bubble, in case of system
  3507. * delays it's possible for the IFE settings to be not written to
  3508. * HW on a given frame. If these scenarios occurs flag as error,
  3509. * and recover.
  3510. */
  3511. switch (sec_evt_data->evt_type) {
  3512. case CAM_ISP_HW_SEC_EVENT_SOF:
  3513. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3514. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF,
  3515. ctx_isp->last_applied_req_id);
  3516. /* Slave RDI's frame starting post IFE EPOCH - Fatal */
  3517. if ((ctx_isp->substate_activated ==
  3518. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3519. (ctx_isp->substate_activated ==
  3520. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED))
  3521. __cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp);
  3522. else
  3523. /* Reset error count */
  3524. ctx_isp->aeb_error_cnt = 0;
  3525. break;
  3526. case CAM_ISP_HW_SEC_EVENT_EPOCH:
  3527. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3528. CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH,
  3529. ctx_isp->last_applied_req_id);
  3530. /*
  3531. * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP
  3532. * On such occasions use CSID CAMIF EPOCH for bubble detection, flag
  3533. * on detection and perform necessary bubble recovery
  3534. */
  3535. if ((ctx_isp->substate_activated ==
  3536. CAM_ISP_CTX_ACTIVATED_APPLIED) ||
  3537. (ctx_isp->substate_activated ==
  3538. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) {
  3539. recover = true;
  3540. CAM_WARN(CAM_ISP,
  3541. "Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3542. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3543. }
  3544. break;
  3545. case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP:
  3546. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3547. CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP,
  3548. ctx_isp->last_applied_req_id);
  3549. /* Avoid recovery loop if frame is dropped at stream on */
  3550. if (!ctx_isp->frame_id) {
  3551. CAM_ERR(CAM_ISP,
  3552. "Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld",
  3553. ctx->ctx_id, ctx->link_hdl,
  3554. ctx_isp->frame_id, ctx_isp->last_applied_req_id);
  3555. rc = -EPERM;
  3556. break;
  3557. }
  3558. recover = true;
  3559. sync_frame_drop = true;
  3560. CAM_WARN(CAM_ISP,
  3561. "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....",
  3562. ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id);
  3563. break;
  3564. default:
  3565. break;
  3566. }
  3567. if (recover &&
  3568. !(isp_ctx_debug.disable_internal_recovery_mask & CAM_ISP_CTX_DISABLE_RECOVERY_AEB))
  3569. rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp);
  3570. end:
  3571. return rc;
  3572. }
  3573. static struct cam_isp_ctx_irq_ops
  3574. cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3575. /* SOF */
  3576. {
  3577. .irq_ops = {
  3578. __cam_isp_ctx_handle_error,
  3579. __cam_isp_ctx_sof_in_activated_state,
  3580. __cam_isp_ctx_reg_upd_in_sof,
  3581. __cam_isp_ctx_notify_sof_in_activated_state,
  3582. __cam_isp_ctx_notify_eof_in_activated_state,
  3583. __cam_isp_ctx_buf_done_in_sof,
  3584. __cam_isp_ctx_handle_secondary_events,
  3585. },
  3586. },
  3587. /* APPLIED */
  3588. {
  3589. .irq_ops = {
  3590. __cam_isp_ctx_handle_error,
  3591. __cam_isp_ctx_sof_in_activated_state,
  3592. __cam_isp_ctx_reg_upd_in_applied_state,
  3593. __cam_isp_ctx_epoch_in_applied,
  3594. __cam_isp_ctx_notify_eof_in_activated_state,
  3595. __cam_isp_ctx_buf_done_in_applied,
  3596. __cam_isp_ctx_handle_secondary_events,
  3597. },
  3598. },
  3599. /* EPOCH */
  3600. {
  3601. .irq_ops = {
  3602. __cam_isp_ctx_handle_error,
  3603. __cam_isp_ctx_sof_in_epoch,
  3604. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3605. __cam_isp_ctx_notify_sof_in_activated_state,
  3606. __cam_isp_ctx_notify_eof_in_activated_state,
  3607. __cam_isp_ctx_buf_done_in_epoch,
  3608. __cam_isp_ctx_handle_secondary_events,
  3609. },
  3610. },
  3611. /* BUBBLE */
  3612. {
  3613. .irq_ops = {
  3614. __cam_isp_ctx_handle_error,
  3615. __cam_isp_ctx_sof_in_activated_state,
  3616. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3617. __cam_isp_ctx_notify_sof_in_activated_state,
  3618. __cam_isp_ctx_notify_eof_in_activated_state,
  3619. __cam_isp_ctx_buf_done_in_bubble,
  3620. __cam_isp_ctx_handle_secondary_events,
  3621. },
  3622. },
  3623. /* Bubble Applied */
  3624. {
  3625. .irq_ops = {
  3626. __cam_isp_ctx_handle_error,
  3627. __cam_isp_ctx_sof_in_activated_state,
  3628. __cam_isp_ctx_reg_upd_in_applied_state,
  3629. __cam_isp_ctx_epoch_in_bubble_applied,
  3630. NULL,
  3631. __cam_isp_ctx_buf_done_in_bubble_applied,
  3632. __cam_isp_ctx_handle_secondary_events,
  3633. },
  3634. },
  3635. /* HW ERROR */
  3636. {
  3637. .irq_ops = {
  3638. NULL,
  3639. __cam_isp_ctx_sof_in_activated_state,
  3640. __cam_isp_ctx_reg_upd_in_hw_error,
  3641. NULL,
  3642. NULL,
  3643. NULL,
  3644. },
  3645. },
  3646. /* HALT */
  3647. {
  3648. },
  3649. };
  3650. static struct cam_isp_ctx_irq_ops
  3651. cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3652. /* SOF */
  3653. {
  3654. .irq_ops = {
  3655. __cam_isp_ctx_handle_error,
  3656. __cam_isp_ctx_fs2_sof_in_sof_state,
  3657. __cam_isp_ctx_fs2_reg_upd_in_sof,
  3658. __cam_isp_ctx_fs2_sof_in_sof_state,
  3659. __cam_isp_ctx_notify_eof_in_activated_state,
  3660. NULL,
  3661. },
  3662. },
  3663. /* APPLIED */
  3664. {
  3665. .irq_ops = {
  3666. __cam_isp_ctx_handle_error,
  3667. __cam_isp_ctx_sof_in_activated_state,
  3668. __cam_isp_ctx_fs2_reg_upd_in_applied_state,
  3669. __cam_isp_ctx_epoch_in_applied,
  3670. __cam_isp_ctx_notify_eof_in_activated_state,
  3671. __cam_isp_ctx_fs2_buf_done_in_applied,
  3672. },
  3673. },
  3674. /* EPOCH */
  3675. {
  3676. .irq_ops = {
  3677. __cam_isp_ctx_handle_error,
  3678. __cam_isp_ctx_sof_in_epoch,
  3679. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3680. __cam_isp_ctx_notify_sof_in_activated_state,
  3681. __cam_isp_ctx_notify_eof_in_activated_state,
  3682. __cam_isp_ctx_fs2_buf_done_in_epoch,
  3683. },
  3684. },
  3685. /* BUBBLE */
  3686. {
  3687. .irq_ops = {
  3688. __cam_isp_ctx_handle_error,
  3689. __cam_isp_ctx_sof_in_activated_state,
  3690. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  3691. __cam_isp_ctx_notify_sof_in_activated_state,
  3692. __cam_isp_ctx_notify_eof_in_activated_state,
  3693. __cam_isp_ctx_buf_done_in_bubble,
  3694. },
  3695. },
  3696. /* Bubble Applied */
  3697. {
  3698. .irq_ops = {
  3699. __cam_isp_ctx_handle_error,
  3700. __cam_isp_ctx_sof_in_activated_state,
  3701. __cam_isp_ctx_reg_upd_in_applied_state,
  3702. __cam_isp_ctx_epoch_in_bubble_applied,
  3703. NULL,
  3704. __cam_isp_ctx_buf_done_in_bubble_applied,
  3705. },
  3706. },
  3707. /* HW ERROR */
  3708. {
  3709. .irq_ops = {
  3710. NULL,
  3711. __cam_isp_ctx_sof_in_activated_state,
  3712. __cam_isp_ctx_reg_upd_in_hw_error,
  3713. NULL,
  3714. NULL,
  3715. NULL,
  3716. },
  3717. },
  3718. /* HALT */
  3719. {
  3720. },
  3721. };
  3722. static struct cam_isp_ctx_irq_ops
  3723. cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3724. /* SOF */
  3725. {
  3726. .irq_ops = {
  3727. __cam_isp_ctx_handle_error,
  3728. NULL,
  3729. NULL,
  3730. NULL,
  3731. NULL,
  3732. NULL,
  3733. },
  3734. },
  3735. /* APPLIED */
  3736. {
  3737. .irq_ops = {
  3738. __cam_isp_ctx_handle_error,
  3739. __cam_isp_ctx_sof_in_activated_state,
  3740. __cam_isp_ctx_reg_upd_in_applied_state,
  3741. __cam_isp_ctx_offline_epoch_in_activated_state,
  3742. NULL,
  3743. __cam_isp_ctx_buf_done_in_applied,
  3744. },
  3745. },
  3746. /* EPOCH */
  3747. {
  3748. .irq_ops = {
  3749. __cam_isp_ctx_handle_error,
  3750. __cam_isp_ctx_sof_in_activated_state,
  3751. NULL,
  3752. __cam_isp_ctx_offline_epoch_in_activated_state,
  3753. NULL,
  3754. __cam_isp_ctx_buf_done_in_epoch,
  3755. },
  3756. },
  3757. /* BUBBLE */
  3758. {
  3759. },
  3760. /* Bubble Applied */
  3761. {
  3762. },
  3763. /* HW ERROR */
  3764. {
  3765. .irq_ops = {
  3766. NULL,
  3767. __cam_isp_ctx_sof_in_activated_state,
  3768. __cam_isp_ctx_reg_upd_in_hw_error,
  3769. NULL,
  3770. NULL,
  3771. NULL,
  3772. },
  3773. },
  3774. /* HALT */
  3775. {
  3776. },
  3777. };
  3778. static inline int cam_isp_context_apply_evt_injection(struct cam_context *ctx)
  3779. {
  3780. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  3781. struct cam_hw_inject_evt_param *evt_inject_params = &ctx_isp->evt_inject_params;
  3782. struct cam_common_evt_inject_data inject_evt = {0};
  3783. int rc;
  3784. inject_evt.evt_params = evt_inject_params;
  3785. rc = cam_context_apply_evt_injection(ctx, &inject_evt);
  3786. if (rc)
  3787. CAM_ERR(CAM_ISP, "Fail to apply event injection ctx_id: %u req_id: %u",
  3788. ctx->ctx_id, evt_inject_params->req_id);
  3789. evt_inject_params->is_valid = false;
  3790. return rc;
  3791. }
  3792. static int __cam_isp_ctx_apply_req_in_activated_state(
  3793. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
  3794. enum cam_isp_ctx_activated_substate next_state)
  3795. {
  3796. int rc = 0;
  3797. struct cam_ctx_request *req;
  3798. struct cam_ctx_request *active_req = NULL;
  3799. struct cam_isp_ctx_req *req_isp;
  3800. struct cam_isp_ctx_req *active_req_isp;
  3801. struct cam_isp_context *ctx_isp = NULL;
  3802. struct cam_hw_config_args cfg = {0};
  3803. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3804. if (apply->re_apply)
  3805. if (apply->request_id <= ctx_isp->last_applied_req_id) {
  3806. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3807. "ctx_id:%d Trying to reapply the same request %llu again",
  3808. ctx->ctx_id,
  3809. apply->request_id);
  3810. return 0;
  3811. }
  3812. if (list_empty(&ctx->pending_req_list)) {
  3813. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3814. "ctx_id:%d No available request for Apply id %lld",
  3815. ctx->ctx_id,
  3816. apply->request_id);
  3817. rc = -EFAULT;
  3818. goto end;
  3819. }
  3820. /*
  3821. * When the pipeline has issue, the requests can be queued up in the
  3822. * pipeline. In this case, we should reject the additional request.
  3823. * The maximum number of request allowed to be outstanding is 2.
  3824. *
  3825. */
  3826. if (atomic_read(&ctx_isp->process_bubble)) {
  3827. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3828. "ctx_id:%d Processing bubble cannot apply Request Id %llu",
  3829. ctx->ctx_id,
  3830. apply->request_id);
  3831. rc = -EFAULT;
  3832. goto end;
  3833. }
  3834. /*
  3835. * When isp processing internal recovery, the crm may still apply
  3836. * req to isp ctx. In this case, we should reject this req apply.
  3837. */
  3838. if (atomic_read(&ctx_isp->internal_recovery_set)) {
  3839. CAM_INFO_RATE_LIMIT(CAM_ISP,
  3840. "ctx_id:%d Processing recovery cannot apply Request Id %lld",
  3841. ctx->ctx_id,
  3842. apply->request_id);
  3843. rc = -EAGAIN;
  3844. goto end;
  3845. }
  3846. spin_lock_bh(&ctx->lock);
  3847. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  3848. list);
  3849. spin_unlock_bh(&ctx->lock);
  3850. /*
  3851. * Check whether the request id is matching the tip, if not, this means
  3852. * we are in the middle of the error handling. Need to reject this apply
  3853. */
  3854. if (req->request_id != apply->request_id) {
  3855. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3856. "ctx_id:%d Invalid Request Id asking %llu existing %llu",
  3857. ctx->ctx_id,
  3858. apply->request_id, req->request_id);
  3859. rc = -EFAULT;
  3860. goto end;
  3861. }
  3862. CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
  3863. req->request_id,
  3864. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
  3865. ctx->ctx_id);
  3866. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3867. if (ctx_isp->active_req_cnt >= 2) {
  3868. CAM_WARN_RATE_LIMIT(CAM_ISP,
  3869. "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u",
  3870. req->request_id,
  3871. ctx_isp->active_req_cnt,
  3872. ctx->ctx_id);
  3873. spin_lock_bh(&ctx->lock);
  3874. if (!list_empty(&ctx->active_req_list))
  3875. active_req = list_first_entry(&ctx->active_req_list,
  3876. struct cam_ctx_request, list);
  3877. else
  3878. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3879. "WARNING: should not happen (cnt = %d) but active_list empty",
  3880. ctx_isp->active_req_cnt);
  3881. spin_unlock_bh(&ctx->lock);
  3882. if (active_req) {
  3883. active_req_isp =
  3884. (struct cam_isp_ctx_req *) active_req->req_priv;
  3885. __cam_isp_ctx_handle_buf_done_fail_log(
  3886. active_req->request_id, active_req_isp,
  3887. ctx_isp->isp_device_type);
  3888. }
  3889. rc = -EFAULT;
  3890. goto end;
  3891. }
  3892. req_isp->bubble_report = apply->report_if_bubble;
  3893. /*
  3894. * Reset all buf done/bubble flags for the req being applied
  3895. * If internal recovery has led to re-apply of same
  3896. * request, clear all stale entities
  3897. */
  3898. req_isp->num_acked = 0;
  3899. req_isp->num_deferred_acks = 0;
  3900. req_isp->cdm_reset_before_apply = false;
  3901. req_isp->bubble_detected = false;
  3902. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3903. cfg.request_id = req->request_id;
  3904. cfg.hw_update_entries = req_isp->cfg;
  3905. cfg.num_hw_update_entries = req_isp->num_cfg;
  3906. cfg.priv = &req_isp->hw_update_data;
  3907. cfg.init_packet = 0;
  3908. cfg.reapply_type = req_isp->reapply_type;
  3909. cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
  3910. if ((ctx_isp->evt_inject_params.is_valid) &&
  3911. (req->request_id == ctx_isp->evt_inject_params.req_id)) {
  3912. rc = cam_isp_context_apply_evt_injection(ctx_isp->base);
  3913. if (!rc)
  3914. goto end;
  3915. }
  3916. atomic_set(&ctx_isp->apply_in_progress, 1);
  3917. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  3918. if (!rc) {
  3919. spin_lock_bh(&ctx->lock);
  3920. ctx_isp->substate_activated = next_state;
  3921. ctx_isp->last_applied_req_id = apply->request_id;
  3922. list_del_init(&req->list);
  3923. if (atomic_read(&ctx_isp->internal_recovery_set))
  3924. __cam_isp_ctx_enqueue_request_in_order(ctx, req, false);
  3925. else
  3926. list_add_tail(&req->list, &ctx->wait_req_list);
  3927. CAM_DBG(CAM_ISP, "new substate Substate[%s], applied req %lld",
  3928. __cam_isp_ctx_substate_val_to_type(next_state),
  3929. ctx_isp->last_applied_req_id);
  3930. spin_unlock_bh(&ctx->lock);
  3931. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3932. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  3933. req->request_id);
  3934. __cam_isp_ctx_update_event_record(ctx_isp,
  3935. CAM_ISP_CTX_EVENT_APPLY, req);
  3936. } else if (rc == -EALREADY) {
  3937. spin_lock_bh(&ctx->lock);
  3938. req_isp->bubble_detected = true;
  3939. req_isp->cdm_reset_before_apply = false;
  3940. atomic_set(&ctx_isp->process_bubble, 1);
  3941. list_del_init(&req->list);
  3942. list_add(&req->list, &ctx->active_req_list);
  3943. ctx_isp->active_req_cnt++;
  3944. spin_unlock_bh(&ctx->lock);
  3945. CAM_DBG(CAM_REQ,
  3946. "move request %lld to active list(cnt = %d), ctx %u",
  3947. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  3948. } else {
  3949. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3950. "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
  3951. ctx->ctx_id, apply->request_id, rc);
  3952. }
  3953. atomic_set(&ctx_isp->apply_in_progress, 0);
  3954. end:
  3955. return rc;
  3956. }
  3957. static int __cam_isp_ctx_apply_req_in_sof(
  3958. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3959. {
  3960. int rc = 0;
  3961. struct cam_isp_context *ctx_isp =
  3962. (struct cam_isp_context *) ctx->ctx_priv;
  3963. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3964. __cam_isp_ctx_substate_val_to_type(
  3965. ctx_isp->substate_activated));
  3966. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3967. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3968. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3969. __cam_isp_ctx_substate_val_to_type(
  3970. ctx_isp->substate_activated));
  3971. if (rc)
  3972. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3973. __cam_isp_ctx_substate_val_to_type(
  3974. ctx_isp->substate_activated), rc);
  3975. return rc;
  3976. }
  3977. static int __cam_isp_ctx_apply_req_in_epoch(
  3978. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3979. {
  3980. int rc = 0;
  3981. struct cam_isp_context *ctx_isp =
  3982. (struct cam_isp_context *) ctx->ctx_priv;
  3983. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3984. __cam_isp_ctx_substate_val_to_type(
  3985. ctx_isp->substate_activated));
  3986. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3987. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3988. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3989. __cam_isp_ctx_substate_val_to_type(
  3990. ctx_isp->substate_activated));
  3991. if (rc)
  3992. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  3993. __cam_isp_ctx_substate_val_to_type(
  3994. ctx_isp->substate_activated), rc);
  3995. return rc;
  3996. }
  3997. static int __cam_isp_ctx_apply_req_in_bubble(
  3998. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3999. {
  4000. int rc = 0;
  4001. struct cam_isp_context *ctx_isp =
  4002. (struct cam_isp_context *) ctx->ctx_priv;
  4003. CAM_DBG(CAM_ISP, "current Substate[%s]",
  4004. __cam_isp_ctx_substate_val_to_type(
  4005. ctx_isp->substate_activated));
  4006. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  4007. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
  4008. CAM_DBG(CAM_ISP, "new Substate[%s]",
  4009. __cam_isp_ctx_substate_val_to_type(
  4010. ctx_isp->substate_activated));
  4011. if (rc)
  4012. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  4013. __cam_isp_ctx_substate_val_to_type(
  4014. ctx_isp->substate_activated), rc);
  4015. return rc;
  4016. }
  4017. static int __cam_isp_ctx_apply_default_req_settings(
  4018. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  4019. {
  4020. int rc = 0;
  4021. struct cam_isp_context *isp_ctx =
  4022. (struct cam_isp_context *) ctx->ctx_priv;
  4023. struct cam_hw_cmd_args hw_cmd_args;
  4024. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4025. hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx;
  4026. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4027. isp_hw_cmd_args.cmd_type =
  4028. CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG;
  4029. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4030. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4031. &hw_cmd_args);
  4032. if (rc)
  4033. CAM_ERR(CAM_ISP,
  4034. "Failed to apply default settings rc %d", rc);
  4035. else
  4036. CAM_DBG(CAM_ISP, "Applied default settings rc %d", rc);
  4037. return rc;
  4038. }
  4039. static void *cam_isp_ctx_user_dump_req_list(
  4040. void *dump_struct, uint8_t *addr_ptr)
  4041. {
  4042. struct list_head *head = NULL;
  4043. uint64_t *addr;
  4044. struct cam_ctx_request *req, *req_temp;
  4045. head = (struct list_head *)dump_struct;
  4046. addr = (uint64_t *)addr_ptr;
  4047. if (!list_empty(head)) {
  4048. list_for_each_entry_safe(req, req_temp, head, list) {
  4049. *addr++ = req->request_id;
  4050. }
  4051. }
  4052. return addr;
  4053. }
  4054. static void *cam_isp_ctx_user_dump_active_requests(
  4055. void *dump_struct, uint8_t *addr_ptr)
  4056. {
  4057. uint64_t *addr;
  4058. struct cam_ctx_request *req;
  4059. req = (struct cam_ctx_request *)dump_struct;
  4060. addr = (uint64_t *)addr_ptr;
  4061. *addr++ = req->request_id;
  4062. return addr;
  4063. }
  4064. static int __cam_isp_ctx_dump_req_info(
  4065. struct cam_context *ctx,
  4066. struct cam_ctx_request *req,
  4067. struct cam_common_hw_dump_args *dump_args)
  4068. {
  4069. int i, rc = 0;
  4070. uint32_t min_len;
  4071. size_t remain_len;
  4072. struct cam_isp_ctx_req *req_isp;
  4073. struct cam_isp_context *ctx_isp;
  4074. struct cam_ctx_request *req_temp;
  4075. if (!req || !ctx || !dump_args) {
  4076. CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK",
  4077. req, ctx, dump_args);
  4078. return -EINVAL;
  4079. }
  4080. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4081. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  4082. if (dump_args->buf_len <= dump_args->offset) {
  4083. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  4084. dump_args->buf_len, dump_args->offset);
  4085. return -ENOSPC;
  4086. }
  4087. remain_len = dump_args->buf_len - dump_args->offset;
  4088. min_len = sizeof(struct cam_isp_context_dump_header) +
  4089. (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS *
  4090. req_isp->num_fence_map_out *
  4091. sizeof(uint64_t));
  4092. if (remain_len < min_len) {
  4093. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  4094. remain_len, min_len);
  4095. return -ENOSPC;
  4096. }
  4097. /* Dump pending request list */
  4098. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4099. &ctx->pending_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_PENDING_REQUESTS:");
  4100. if (rc) {
  4101. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Pending request dump failed, rc: %d",
  4102. rc);
  4103. return rc;
  4104. }
  4105. /* Dump applied request list */
  4106. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4107. &ctx->wait_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_APPLIED_REQUESTS:");
  4108. if (rc) {
  4109. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Applied request dump failed, rc: %d",
  4110. rc);
  4111. return rc;
  4112. }
  4113. /* Dump active request list */
  4114. rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list,
  4115. &ctx->active_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_ACTIVE_REQUESTS:");
  4116. if (rc) {
  4117. CAM_ERR(CAM_ISP, "CAM_ISP_CONTEXT: Active request dump failed, rc: %d",
  4118. rc);
  4119. return rc;
  4120. }
  4121. /* Dump active request fences */
  4122. if (!list_empty(&ctx->active_req_list)) {
  4123. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  4124. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4125. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4126. rc = cam_common_user_dump_helper(dump_args,
  4127. cam_isp_ctx_user_dump_active_requests,
  4128. req, sizeof(uint64_t),
  4129. "ISP_OUT_FENCE_REQUEST_ACTIVE.%s.%u.%d:",
  4130. __cam_isp_ife_sfe_resource_handle_id_to_type(
  4131. req_isp->fence_map_out[i].resource_handle),
  4132. req_isp->fence_map_out[i].image_buf_addr[0],
  4133. req_isp->fence_map_out[i].sync_id);
  4134. if (rc) {
  4135. CAM_ERR(CAM_ISP,
  4136. "CAM_ISP_CONTEXT DUMP_REQ_INFO: Dump failed, rc: %d",
  4137. rc);
  4138. return rc;
  4139. }
  4140. }
  4141. }
  4142. }
  4143. return rc;
  4144. }
  4145. static void *cam_isp_ctx_user_dump_timer(
  4146. void *dump_struct, uint8_t *addr_ptr)
  4147. {
  4148. struct cam_ctx_request *req = NULL;
  4149. struct cam_isp_ctx_req *req_isp = NULL;
  4150. uint64_t *addr;
  4151. ktime_t cur_time;
  4152. req = (struct cam_ctx_request *)dump_struct;
  4153. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4154. cur_time = ktime_get();
  4155. addr = (uint64_t *)addr_ptr;
  4156. *addr++ = req->request_id;
  4157. *addr++ = ktime_to_timespec64(
  4158. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_sec;
  4159. *addr++ = ktime_to_timespec64(
  4160. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_nsec / NSEC_PER_USEC;
  4161. *addr++ = ktime_to_timespec64(cur_time).tv_sec;
  4162. *addr++ = ktime_to_timespec64(cur_time).tv_nsec / NSEC_PER_USEC;
  4163. return addr;
  4164. }
  4165. static void *cam_isp_ctx_user_dump_stream_info(
  4166. void *dump_struct, uint8_t *addr_ptr)
  4167. {
  4168. struct cam_context *ctx = NULL;
  4169. int32_t *addr;
  4170. ctx = (struct cam_context *)dump_struct;
  4171. addr = (int32_t *)addr_ptr;
  4172. *addr++ = ctx->ctx_id;
  4173. *addr++ = ctx->dev_hdl;
  4174. *addr++ = ctx->link_hdl;
  4175. return addr;
  4176. }
  4177. static int __cam_isp_ctx_dump_in_top_state(
  4178. struct cam_context *ctx,
  4179. struct cam_req_mgr_dump_info *dump_info)
  4180. {
  4181. int rc = 0;
  4182. bool dump_only_event_record = false;
  4183. size_t buf_len;
  4184. size_t remain_len;
  4185. ktime_t cur_time;
  4186. uint32_t min_len;
  4187. uint64_t diff;
  4188. uintptr_t cpu_addr;
  4189. uint8_t req_type;
  4190. struct cam_isp_context *ctx_isp;
  4191. struct cam_ctx_request *req = NULL;
  4192. struct cam_isp_ctx_req *req_isp;
  4193. struct cam_ctx_request *req_temp;
  4194. struct cam_hw_dump_args ife_dump_args;
  4195. struct cam_common_hw_dump_args dump_args;
  4196. struct cam_hw_cmd_args hw_cmd_args;
  4197. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4198. spin_lock_bh(&ctx->lock);
  4199. list_for_each_entry_safe(req, req_temp,
  4200. &ctx->active_req_list, list) {
  4201. if (req->request_id == dump_info->req_id) {
  4202. CAM_INFO(CAM_ISP, "isp dump active list req: %lld",
  4203. dump_info->req_id);
  4204. req_type = 'a';
  4205. goto hw_dump;
  4206. }
  4207. }
  4208. list_for_each_entry_safe(req, req_temp,
  4209. &ctx->wait_req_list, list) {
  4210. if (req->request_id == dump_info->req_id) {
  4211. CAM_INFO(CAM_ISP, "isp dump wait list req: %lld",
  4212. dump_info->req_id);
  4213. req_type = 'w';
  4214. goto hw_dump;
  4215. }
  4216. }
  4217. list_for_each_entry_safe(req, req_temp,
  4218. &ctx->pending_req_list, list) {
  4219. if (req->request_id == dump_info->req_id) {
  4220. CAM_INFO(CAM_ISP, "isp dump pending list req: %lld",
  4221. dump_info->req_id);
  4222. req_type = 'p';
  4223. goto hw_dump;
  4224. }
  4225. }
  4226. goto end;
  4227. hw_dump:
  4228. rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
  4229. &cpu_addr, &buf_len);
  4230. if (rc) {
  4231. CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
  4232. dump_info->buf_handle, rc);
  4233. goto end;
  4234. }
  4235. if (buf_len <= dump_info->offset) {
  4236. spin_unlock_bh(&ctx->lock);
  4237. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  4238. buf_len, dump_info->offset);
  4239. return -ENOSPC;
  4240. }
  4241. remain_len = buf_len - dump_info->offset;
  4242. min_len = sizeof(struct cam_isp_context_dump_header) +
  4243. (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
  4244. if (remain_len < min_len) {
  4245. spin_unlock_bh(&ctx->lock);
  4246. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  4247. remain_len, min_len);
  4248. return -ENOSPC;
  4249. }
  4250. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4251. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4252. cur_time = ktime_get();
  4253. diff = ktime_us_delta(
  4254. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY],
  4255. cur_time);
  4256. if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
  4257. CAM_INFO(CAM_ISP, "req %lld found no error",
  4258. req->request_id);
  4259. dump_only_event_record = true;
  4260. }
  4261. dump_args.req_id = dump_info->req_id;
  4262. dump_args.cpu_addr = cpu_addr;
  4263. dump_args.buf_len = buf_len;
  4264. dump_args.offset = dump_info->offset;
  4265. dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4266. /* Dump time info */
  4267. rc = cam_common_user_dump_helper(&dump_args, cam_isp_ctx_user_dump_timer,
  4268. req, sizeof(uint64_t), "ISP_CTX_DUMP:.%c", req_type);
  4269. if (rc) {
  4270. CAM_ERR(CAM_ISP, "Time dump fail %lld, rc: %d",
  4271. req->request_id, rc);
  4272. goto end;
  4273. }
  4274. dump_info->offset = dump_args.offset;
  4275. /* Dump stream info */
  4276. ctx->ctxt_to_hw_map = ctx_isp->hw_ctx;
  4277. if (ctx->hw_mgr_intf->hw_dump) {
  4278. /* Dump first part of stream info from isp context */
  4279. rc = cam_common_user_dump_helper(&dump_args,
  4280. cam_isp_ctx_user_dump_stream_info, ctx,
  4281. sizeof(int32_t), "ISP_STREAM_INFO_FROM_CTX:");
  4282. if (rc) {
  4283. CAM_ERR(CAM_ISP, "ISP CTX stream info dump fail %lld, rc: %d",
  4284. req->request_id, rc);
  4285. goto end;
  4286. }
  4287. /* Dump second part of stream info from ife hw manager */
  4288. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  4289. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4290. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_DUMP_STREAM_INFO;
  4291. isp_hw_cmd_args.cmd_data = &dump_args;
  4292. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4293. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args);
  4294. if (rc) {
  4295. CAM_ERR(CAM_ISP, "IFE HW MGR stream info dump fail %lld, rc: %d",
  4296. req->request_id, rc);
  4297. goto end;
  4298. }
  4299. dump_info->offset = dump_args.offset;
  4300. }
  4301. /* Dump event record */
  4302. rc = __cam_isp_ctx_dump_event_record(ctx_isp, &dump_args);
  4303. if (rc) {
  4304. CAM_ERR(CAM_ISP, "Event record dump fail %lld, rc: %d",
  4305. req->request_id, rc);
  4306. goto end;
  4307. }
  4308. dump_info->offset = dump_args.offset;
  4309. if (dump_only_event_record) {
  4310. goto end;
  4311. }
  4312. /* Dump state monitor array */
  4313. rc = __cam_isp_ctx_user_dump_state_monitor_array(ctx_isp, &dump_args);
  4314. if (rc) {
  4315. CAM_ERR(CAM_ISP, "Dump event fail %lld, rc: %d",
  4316. req->request_id, rc);
  4317. goto end;
  4318. }
  4319. /* Dump request info */
  4320. rc = __cam_isp_ctx_dump_req_info(ctx, req, &dump_args);
  4321. if (rc) {
  4322. CAM_ERR(CAM_ISP, "Dump Req info fail %lld, rc: %d",
  4323. req->request_id, rc);
  4324. goto end;
  4325. }
  4326. spin_unlock_bh(&ctx->lock);
  4327. /* Dump CSID, VFE, and SFE info */
  4328. dump_info->offset = dump_args.offset;
  4329. if (ctx->hw_mgr_intf->hw_dump) {
  4330. ife_dump_args.offset = dump_args.offset;
  4331. ife_dump_args.request_id = dump_info->req_id;
  4332. ife_dump_args.buf_handle = dump_info->buf_handle;
  4333. ife_dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4334. rc = ctx->hw_mgr_intf->hw_dump(
  4335. ctx->hw_mgr_intf->hw_mgr_priv,
  4336. &ife_dump_args);
  4337. dump_info->offset = ife_dump_args.offset;
  4338. }
  4339. return rc;
  4340. end:
  4341. spin_unlock_bh(&ctx->lock);
  4342. return rc;
  4343. }
  4344. static int __cam_isp_ctx_flush_req_in_flushed_state(
  4345. struct cam_context *ctx,
  4346. struct cam_req_mgr_flush_request *flush_req)
  4347. {
  4348. CAM_INFO(CAM_ISP, "Flush (type %d) in flushed state req id %lld ctx_id:%d",
  4349. flush_req->type, flush_req->req_id, ctx->ctx_id);
  4350. if (flush_req->req_id > ctx->last_flush_req)
  4351. ctx->last_flush_req = flush_req->req_id;
  4352. return 0;
  4353. }
  4354. static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
  4355. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  4356. {
  4357. int i, rc, tmp = 0;
  4358. uint32_t cancel_req_id_found = 0;
  4359. struct cam_ctx_request *req;
  4360. struct cam_ctx_request *req_temp;
  4361. struct cam_isp_ctx_req *req_isp;
  4362. struct list_head flush_list;
  4363. struct cam_isp_context *ctx_isp = NULL;
  4364. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4365. INIT_LIST_HEAD(&flush_list);
  4366. if (list_empty(req_list)) {
  4367. CAM_DBG(CAM_ISP, "request list is empty");
  4368. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4369. CAM_INFO(CAM_ISP, "no request to cancel (last applied:%lld cancel:%lld)",
  4370. ctx_isp->last_applied_req_id, flush_req->req_id);
  4371. return -EINVAL;
  4372. } else
  4373. return 0;
  4374. }
  4375. CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
  4376. flush_req->type, flush_req->req_id);
  4377. list_for_each_entry_safe(req, req_temp, req_list, list) {
  4378. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  4379. if (req->request_id != flush_req->req_id) {
  4380. continue;
  4381. } else {
  4382. list_del_init(&req->list);
  4383. list_add_tail(&req->list, &flush_list);
  4384. cancel_req_id_found = 1;
  4385. __cam_isp_ctx_update_state_monitor_array(
  4386. ctx_isp,
  4387. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
  4388. req->request_id);
  4389. break;
  4390. }
  4391. }
  4392. list_del_init(&req->list);
  4393. list_add_tail(&req->list, &flush_list);
  4394. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  4395. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id);
  4396. }
  4397. if (list_empty(&flush_list)) {
  4398. /*
  4399. * Maybe the req isn't sent to KMD since UMD already skip
  4400. * req in CSL layer.
  4401. */
  4402. CAM_INFO(CAM_ISP,
  4403. "flush list is empty, flush type %d for req %llu",
  4404. flush_req->type, flush_req->req_id);
  4405. return 0;
  4406. }
  4407. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  4408. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4409. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4410. if (req_isp->fence_map_out[i].sync_id != -1) {
  4411. CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
  4412. req->request_id,
  4413. req_isp->fence_map_out[i].sync_id);
  4414. rc = cam_sync_signal(
  4415. req_isp->fence_map_out[i].sync_id,
  4416. CAM_SYNC_STATE_SIGNALED_CANCEL,
  4417. CAM_SYNC_ISP_EVENT_FLUSH);
  4418. if (rc) {
  4419. tmp = req_isp->fence_map_out[i].sync_id;
  4420. CAM_ERR_RATE_LIMIT(CAM_ISP,
  4421. "signal fence %d failed", tmp);
  4422. }
  4423. req_isp->fence_map_out[i].sync_id = -1;
  4424. }
  4425. }
  4426. req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
  4427. req_isp->cdm_reset_before_apply = false;
  4428. list_del_init(&req->list);
  4429. list_add_tail(&req->list, &ctx->free_req_list);
  4430. }
  4431. return 0;
  4432. }
  4433. static int __cam_isp_ctx_flush_req_in_top_state(
  4434. struct cam_context *ctx,
  4435. struct cam_req_mgr_flush_request *flush_req)
  4436. {
  4437. int rc = 0;
  4438. struct cam_isp_context *ctx_isp;
  4439. struct cam_isp_stop_args stop_isp;
  4440. struct cam_hw_stop_args stop_args;
  4441. struct cam_hw_reset_args reset_args;
  4442. struct cam_req_mgr_timer_notify timer;
  4443. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  4444. CAM_DBG(CAM_ISP, "Flush pending list");
  4445. spin_lock_bh(&ctx->lock);
  4446. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4447. spin_unlock_bh(&ctx->lock);
  4448. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  4449. if (ctx->state <= CAM_CTX_READY) {
  4450. ctx->state = CAM_CTX_ACQUIRED;
  4451. goto end;
  4452. }
  4453. spin_lock_bh(&ctx->lock);
  4454. ctx->state = CAM_CTX_FLUSHED;
  4455. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  4456. spin_unlock_bh(&ctx->lock);
  4457. CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%d",
  4458. flush_req->req_id, ctx->ctx_id);
  4459. ctx->last_flush_req = flush_req->req_id;
  4460. __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx);
  4461. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4462. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  4463. stop_isp.stop_only = true;
  4464. stop_isp.is_internal_stop = false;
  4465. stop_args.args = (void *)&stop_isp;
  4466. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  4467. &stop_args);
  4468. if (rc)
  4469. CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
  4470. rc);
  4471. CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.");
  4472. CAM_DBG(CAM_ISP, "Flush wait and active lists");
  4473. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  4474. timer.link_hdl = ctx->link_hdl;
  4475. timer.dev_hdl = ctx->dev_hdl;
  4476. timer.state = false;
  4477. ctx->ctx_crm_intf->notify_timer(&timer);
  4478. }
  4479. spin_lock_bh(&ctx->lock);
  4480. if (!list_empty(&ctx->wait_req_list))
  4481. rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
  4482. flush_req);
  4483. if (!list_empty(&ctx->active_req_list))
  4484. rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
  4485. flush_req);
  4486. ctx_isp->active_req_cnt = 0;
  4487. spin_unlock_bh(&ctx->lock);
  4488. reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4489. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  4490. &reset_args);
  4491. if (rc)
  4492. CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
  4493. ctx_isp->init_received = false;
  4494. }
  4495. end:
  4496. ctx_isp->bubble_frame_cnt = 0;
  4497. atomic_set(&ctx_isp->process_bubble, 0);
  4498. atomic_set(&ctx_isp->rxd_epoch, 0);
  4499. atomic_set(&ctx_isp->internal_recovery_set, 0);
  4500. return rc;
  4501. }
  4502. static int __cam_isp_ctx_flush_req_in_ready(
  4503. struct cam_context *ctx,
  4504. struct cam_req_mgr_flush_request *flush_req)
  4505. {
  4506. int rc = 0;
  4507. CAM_DBG(CAM_ISP, "try to flush pending list");
  4508. spin_lock_bh(&ctx->lock);
  4509. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  4510. /* if nothing is in pending req list, change state to acquire */
  4511. if (list_empty(&ctx->pending_req_list))
  4512. ctx->state = CAM_CTX_ACQUIRED;
  4513. spin_unlock_bh(&ctx->lock);
  4514. trace_cam_context_state("ISP", ctx);
  4515. CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
  4516. ctx->state);
  4517. return rc;
  4518. }
  4519. static struct cam_ctx_ops
  4520. cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4521. /* SOF */
  4522. {
  4523. .ioctl_ops = {},
  4524. .crm_ops = {
  4525. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4526. .notify_frame_skip =
  4527. __cam_isp_ctx_apply_default_req_settings,
  4528. },
  4529. .irq_ops = NULL,
  4530. },
  4531. /* APPLIED */
  4532. {
  4533. .ioctl_ops = {},
  4534. .crm_ops = {},
  4535. .irq_ops = NULL,
  4536. },
  4537. /* EPOCH */
  4538. {
  4539. .ioctl_ops = {},
  4540. .crm_ops = {
  4541. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4542. .notify_frame_skip =
  4543. __cam_isp_ctx_apply_default_req_settings,
  4544. },
  4545. .irq_ops = NULL,
  4546. },
  4547. /* BUBBLE */
  4548. {
  4549. .ioctl_ops = {},
  4550. .crm_ops = {
  4551. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4552. .notify_frame_skip =
  4553. __cam_isp_ctx_apply_default_req_settings,
  4554. },
  4555. .irq_ops = NULL,
  4556. },
  4557. /* Bubble Applied */
  4558. {
  4559. .ioctl_ops = {},
  4560. .crm_ops = {},
  4561. .irq_ops = NULL,
  4562. },
  4563. /* HW ERROR */
  4564. {
  4565. .ioctl_ops = {},
  4566. .crm_ops = {},
  4567. .irq_ops = NULL,
  4568. },
  4569. /* HALT */
  4570. {
  4571. .ioctl_ops = {},
  4572. .crm_ops = {},
  4573. .irq_ops = NULL,
  4574. },
  4575. };
  4576. static struct cam_ctx_ops
  4577. cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  4578. /* SOF */
  4579. {
  4580. .ioctl_ops = {},
  4581. .crm_ops = {
  4582. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  4583. },
  4584. .irq_ops = NULL,
  4585. },
  4586. /* APPLIED */
  4587. {
  4588. .ioctl_ops = {},
  4589. .crm_ops = {},
  4590. .irq_ops = NULL,
  4591. },
  4592. /* EPOCH */
  4593. {
  4594. .ioctl_ops = {},
  4595. .crm_ops = {
  4596. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  4597. },
  4598. .irq_ops = NULL,
  4599. },
  4600. /* BUBBLE */
  4601. {
  4602. .ioctl_ops = {},
  4603. .crm_ops = {
  4604. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  4605. },
  4606. .irq_ops = NULL,
  4607. },
  4608. /* Bubble Applied */
  4609. {
  4610. .ioctl_ops = {},
  4611. .crm_ops = {},
  4612. .irq_ops = NULL,
  4613. },
  4614. /* HW ERROR */
  4615. {
  4616. .ioctl_ops = {},
  4617. .crm_ops = {},
  4618. .irq_ops = NULL,
  4619. },
  4620. /* HALT */
  4621. {
  4622. .ioctl_ops = {},
  4623. .crm_ops = {},
  4624. .irq_ops = NULL,
  4625. },
  4626. };
  4627. static int __cam_isp_ctx_rdi_only_sof_in_top_state(
  4628. struct cam_isp_context *ctx_isp, void *evt_data)
  4629. {
  4630. int rc = 0;
  4631. struct cam_context *ctx = ctx_isp->base;
  4632. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4633. uint64_t request_id = 0;
  4634. if (!evt_data) {
  4635. CAM_ERR(CAM_ISP, "in valid sof event data");
  4636. return -EINVAL;
  4637. }
  4638. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4639. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4640. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4641. /*
  4642. * notify reqmgr with sof signal. Note, due to scheduling delay
  4643. * we can run into situation that two active requests has already
  4644. * be in the active queue while we try to do the notification.
  4645. * In this case, we need to skip the current notification. This
  4646. * helps the state machine to catch up the delay.
  4647. */
  4648. if (ctx_isp->active_req_cnt <= 2) {
  4649. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4650. /*
  4651. * It's possible for rup done to be processed before
  4652. * SOF, check for first active request shutter here
  4653. */
  4654. if (!list_empty(&ctx->active_req_list)) {
  4655. struct cam_ctx_request *req = NULL;
  4656. req = list_first_entry(&ctx->active_req_list,
  4657. struct cam_ctx_request, list);
  4658. if (req->request_id > ctx_isp->reported_req_id) {
  4659. request_id = req->request_id;
  4660. ctx_isp->reported_req_id = request_id;
  4661. }
  4662. }
  4663. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4664. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4665. } else {
  4666. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  4667. }
  4668. if (list_empty(&ctx->active_req_list))
  4669. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4670. else
  4671. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  4672. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4673. __cam_isp_ctx_substate_val_to_type(
  4674. ctx_isp->substate_activated));
  4675. return rc;
  4676. }
  4677. static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
  4678. struct cam_isp_context *ctx_isp, void *evt_data)
  4679. {
  4680. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4681. if (!evt_data) {
  4682. CAM_ERR(CAM_ISP, "in valid sof event data");
  4683. return -EINVAL;
  4684. }
  4685. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4686. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4687. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4688. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
  4689. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4690. __cam_isp_ctx_substate_val_to_type(
  4691. ctx_isp->substate_activated));
  4692. return 0;
  4693. }
  4694. static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
  4695. struct cam_isp_context *ctx_isp, void *evt_data)
  4696. {
  4697. struct cam_ctx_request *req;
  4698. struct cam_isp_ctx_req *req_isp;
  4699. struct cam_context *ctx = ctx_isp->base;
  4700. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4701. uint64_t request_id = 0;
  4702. /*
  4703. * Sof in bubble applied state means, reg update not received.
  4704. * before increment frame id and override time stamp value, send
  4705. * the previous sof time stamp that got captured in the
  4706. * sof in applied state.
  4707. */
  4708. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4709. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4710. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4711. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4712. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4713. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4714. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4715. if (list_empty(&ctx->wait_req_list)) {
  4716. /*
  4717. * If no pending req in epoch, this is an error case.
  4718. * The recovery is to go back to sof state
  4719. */
  4720. CAM_ERR(CAM_ISP, "No wait request");
  4721. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4722. /* Send SOF event as empty frame*/
  4723. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4724. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4725. goto end;
  4726. }
  4727. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  4728. list);
  4729. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  4730. req_isp->bubble_detected = true;
  4731. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  4732. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  4733. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  4734. req_isp->cdm_reset_before_apply = false;
  4735. if (req_isp->bubble_report) {
  4736. __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE,
  4737. req->request_id, ctx_isp);
  4738. atomic_set(&ctx_isp->process_bubble, 1);
  4739. } else {
  4740. req_isp->bubble_report = 0;
  4741. }
  4742. /*
  4743. * Always move the request to active list. Let buf done
  4744. * function handles the rest.
  4745. */
  4746. list_del_init(&req->list);
  4747. list_add_tail(&req->list, &ctx->active_req_list);
  4748. ctx_isp->active_req_cnt++;
  4749. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
  4750. req->request_id, ctx_isp->active_req_cnt);
  4751. if (!req_isp->bubble_report) {
  4752. if (req->request_id > ctx_isp->reported_req_id) {
  4753. request_id = req->request_id;
  4754. ctx_isp->reported_req_id = request_id;
  4755. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4756. CAM_REQ_MGR_SOF_EVENT_ERROR);
  4757. } else
  4758. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4759. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4760. } else
  4761. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4762. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4763. /* change the state to bubble, as reg update has not come */
  4764. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  4765. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4766. __cam_isp_ctx_substate_val_to_type(
  4767. ctx_isp->substate_activated));
  4768. end:
  4769. return 0;
  4770. }
  4771. static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
  4772. struct cam_isp_context *ctx_isp, void *evt_data)
  4773. {
  4774. uint32_t i;
  4775. struct cam_ctx_request *req;
  4776. struct cam_context *ctx = ctx_isp->base;
  4777. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  4778. struct cam_isp_ctx_req *req_isp;
  4779. struct cam_hw_cmd_args hw_cmd_args;
  4780. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4781. uint64_t request_id = 0;
  4782. uint64_t last_cdm_done_req = 0;
  4783. int rc = 0;
  4784. if (!evt_data) {
  4785. CAM_ERR(CAM_ISP, "in valid sof event data");
  4786. return -EINVAL;
  4787. }
  4788. __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp);
  4789. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  4790. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  4791. if (atomic_read(&ctx_isp->process_bubble)) {
  4792. if (list_empty(&ctx->active_req_list)) {
  4793. CAM_ERR(CAM_ISP, "No available active req in bubble");
  4794. atomic_set(&ctx_isp->process_bubble, 0);
  4795. return -EINVAL;
  4796. }
  4797. if (ctx_isp->last_sof_timestamp ==
  4798. ctx_isp->sof_timestamp_val) {
  4799. CAM_DBG(CAM_ISP,
  4800. "Tasklet delay detected! Bubble frame: %lld check skipped, sof_timestamp: %lld, ctx_id: %d",
  4801. ctx_isp->frame_id,
  4802. ctx_isp->sof_timestamp_val,
  4803. ctx->ctx_id);
  4804. goto end;
  4805. }
  4806. req = list_first_entry(&ctx->active_req_list,
  4807. struct cam_ctx_request, list);
  4808. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4809. if (req_isp->bubble_detected) {
  4810. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4811. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4812. isp_hw_cmd_args.cmd_type =
  4813. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  4814. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4815. rc = ctx->hw_mgr_intf->hw_cmd(
  4816. ctx->hw_mgr_intf->hw_mgr_priv,
  4817. &hw_cmd_args);
  4818. if (rc) {
  4819. CAM_ERR(CAM_ISP, "HW command failed");
  4820. return rc;
  4821. }
  4822. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  4823. CAM_DBG(CAM_ISP, "last_cdm_done req: %d ctx_id: %d",
  4824. last_cdm_done_req, ctx->ctx_id);
  4825. if (last_cdm_done_req >= req->request_id) {
  4826. CAM_DBG(CAM_ISP,
  4827. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  4828. req->request_id);
  4829. if (req_isp->num_fence_map_out ==
  4830. req_isp->num_deferred_acks) {
  4831. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  4832. true,
  4833. CAM_SYNC_STATE_SIGNALED_ERROR,
  4834. CAM_SYNC_ISP_EVENT_BUBBLE);
  4835. __cam_isp_ctx_handle_buf_done_for_req_list(
  4836. ctx_isp, req);
  4837. }
  4838. goto end;
  4839. } else {
  4840. CAM_WARN(CAM_ISP,
  4841. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  4842. req->request_id);
  4843. req_isp->num_acked = 0;
  4844. req_isp->num_deferred_acks = 0;
  4845. req_isp->bubble_detected = false;
  4846. req_isp->cdm_reset_before_apply = true;
  4847. list_del_init(&req->list);
  4848. list_add(&req->list, &ctx->pending_req_list);
  4849. atomic_set(&ctx_isp->process_bubble, 0);
  4850. ctx_isp->active_req_cnt--;
  4851. CAM_DBG(CAM_REQ,
  4852. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  4853. req->request_id,
  4854. ctx_isp->active_req_cnt, ctx->ctx_id);
  4855. }
  4856. goto end;
  4857. }
  4858. }
  4859. /*
  4860. * Signal all active requests with error and move the all the active
  4861. * requests to free list
  4862. */
  4863. while (!list_empty(&ctx->active_req_list)) {
  4864. req = list_first_entry(&ctx->active_req_list,
  4865. struct cam_ctx_request, list);
  4866. list_del_init(&req->list);
  4867. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4868. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  4869. req_isp->num_fence_map_out);
  4870. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4871. if (req_isp->fence_map_out[i].sync_id != -1) {
  4872. cam_sync_signal(
  4873. req_isp->fence_map_out[i].sync_id,
  4874. CAM_SYNC_STATE_SIGNALED_ERROR,
  4875. CAM_SYNC_ISP_EVENT_BUBBLE);
  4876. }
  4877. list_add_tail(&req->list, &ctx->free_req_list);
  4878. ctx_isp->active_req_cnt--;
  4879. }
  4880. end:
  4881. /* notify reqmgr with sof signal */
  4882. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4883. /*
  4884. * It is idle frame with out any applied request id, send
  4885. * request id as zero
  4886. */
  4887. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4888. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4889. /*
  4890. * Can't move the substate to SOF if we are processing bubble,
  4891. * since the SOF substate can't receive REG_UPD and buf done,
  4892. * then the processing of bubble req can't be finished
  4893. */
  4894. if (!atomic_read(&ctx_isp->process_bubble))
  4895. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4896. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4897. __cam_isp_ctx_substate_val_to_type(
  4898. ctx_isp->substate_activated));
  4899. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  4900. return 0;
  4901. }
  4902. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state(
  4903. struct cam_isp_context *ctx_isp, void *evt_data)
  4904. {
  4905. struct cam_ctx_request *req = NULL;
  4906. struct cam_context *ctx = ctx_isp->base;
  4907. req = list_first_entry(&ctx->active_req_list,
  4908. struct cam_ctx_request, list);
  4909. CAM_INFO(CAM_ISP, "Received RUP for Bubble Request", req->request_id);
  4910. return 0;
  4911. }
  4912. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
  4913. struct cam_isp_context *ctx_isp, void *evt_data)
  4914. {
  4915. struct cam_ctx_request *req = NULL;
  4916. struct cam_context *ctx = ctx_isp->base;
  4917. struct cam_isp_ctx_req *req_isp;
  4918. uint64_t request_id = 0;
  4919. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  4920. /* notify reqmgr with sof signal*/
  4921. if (list_empty(&ctx->wait_req_list)) {
  4922. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  4923. goto error;
  4924. }
  4925. req = list_first_entry(&ctx->wait_req_list,
  4926. struct cam_ctx_request, list);
  4927. list_del_init(&req->list);
  4928. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4929. request_id =
  4930. (req_isp->hw_update_data.packet_opcode_type ==
  4931. CAM_ISP_PACKET_INIT_DEV) ? 0 : req->request_id;
  4932. if (req_isp->num_fence_map_out != 0) {
  4933. list_add_tail(&req->list, &ctx->active_req_list);
  4934. ctx_isp->active_req_cnt++;
  4935. CAM_DBG(CAM_ISP,
  4936. "move request %lld to active list(cnt = %d)",
  4937. req->request_id, ctx_isp->active_req_cnt);
  4938. /* if packet has buffers, set correct request id */
  4939. request_id = req->request_id;
  4940. } else {
  4941. /* no io config, so the request is completed. */
  4942. list_add_tail(&req->list, &ctx->free_req_list);
  4943. CAM_DBG(CAM_ISP,
  4944. "move active req %lld to free list(cnt=%d)",
  4945. req->request_id, ctx_isp->active_req_cnt);
  4946. }
  4947. __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp);
  4948. if (request_id)
  4949. ctx_isp->reported_req_id = request_id;
  4950. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4951. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4952. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4953. __cam_isp_ctx_substate_val_to_type(
  4954. ctx_isp->substate_activated));
  4955. __cam_isp_ctx_update_event_record(ctx_isp,
  4956. CAM_ISP_CTX_EVENT_RUP, req);
  4957. return 0;
  4958. error:
  4959. /* Send SOF event as idle frame*/
  4960. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  4961. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  4962. __cam_isp_ctx_update_event_record(ctx_isp,
  4963. CAM_ISP_CTX_EVENT_RUP, NULL);
  4964. /*
  4965. * There is no request in the pending list, move the sub state machine
  4966. * to SOF sub state
  4967. */
  4968. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  4969. return 0;
  4970. }
  4971. static struct cam_isp_ctx_irq_ops
  4972. cam_isp_ctx_rdi_only_activated_state_machine_irq
  4973. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  4974. /* SOF */
  4975. {
  4976. .irq_ops = {
  4977. NULL,
  4978. __cam_isp_ctx_rdi_only_sof_in_top_state,
  4979. __cam_isp_ctx_reg_upd_in_sof,
  4980. NULL,
  4981. __cam_isp_ctx_notify_eof_in_activated_state,
  4982. NULL,
  4983. },
  4984. },
  4985. /* APPLIED */
  4986. {
  4987. .irq_ops = {
  4988. __cam_isp_ctx_handle_error,
  4989. __cam_isp_ctx_rdi_only_sof_in_applied_state,
  4990. __cam_isp_ctx_reg_upd_in_applied_state,
  4991. NULL,
  4992. __cam_isp_ctx_notify_eof_in_activated_state,
  4993. __cam_isp_ctx_buf_done_in_applied,
  4994. },
  4995. },
  4996. /* EPOCH */
  4997. {
  4998. .irq_ops = {
  4999. __cam_isp_ctx_handle_error,
  5000. __cam_isp_ctx_rdi_only_sof_in_top_state,
  5001. NULL,
  5002. NULL,
  5003. __cam_isp_ctx_notify_eof_in_activated_state,
  5004. __cam_isp_ctx_buf_done_in_epoch,
  5005. },
  5006. },
  5007. /* BUBBLE*/
  5008. {
  5009. .irq_ops = {
  5010. __cam_isp_ctx_handle_error,
  5011. __cam_isp_ctx_rdi_only_sof_in_bubble_state,
  5012. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state,
  5013. NULL,
  5014. __cam_isp_ctx_notify_eof_in_activated_state,
  5015. __cam_isp_ctx_buf_done_in_bubble,
  5016. },
  5017. },
  5018. /* BUBBLE APPLIED ie PRE_BUBBLE */
  5019. {
  5020. .irq_ops = {
  5021. __cam_isp_ctx_handle_error,
  5022. __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
  5023. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
  5024. NULL,
  5025. __cam_isp_ctx_notify_eof_in_activated_state,
  5026. __cam_isp_ctx_buf_done_in_bubble_applied,
  5027. },
  5028. },
  5029. /* HW ERROR */
  5030. {
  5031. },
  5032. /* HALT */
  5033. {
  5034. },
  5035. };
  5036. static int __cam_isp_ctx_rdi_only_apply_req_top_state(
  5037. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  5038. {
  5039. int rc = 0;
  5040. struct cam_isp_context *ctx_isp =
  5041. (struct cam_isp_context *) ctx->ctx_priv;
  5042. CAM_DBG(CAM_ISP, "current Substate[%s]",
  5043. __cam_isp_ctx_substate_val_to_type(
  5044. ctx_isp->substate_activated));
  5045. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  5046. CAM_ISP_CTX_ACTIVATED_APPLIED);
  5047. CAM_DBG(CAM_ISP, "new Substate[%s]",
  5048. __cam_isp_ctx_substate_val_to_type(
  5049. ctx_isp->substate_activated));
  5050. if (rc)
  5051. CAM_ERR_RATE_LIMIT(CAM_ISP,
  5052. "ctx_id:%d Apply failed in Substate[%s], rc %d",
  5053. ctx->ctx_id,
  5054. __cam_isp_ctx_substate_val_to_type(
  5055. ctx_isp->substate_activated), rc);
  5056. return rc;
  5057. }
  5058. static struct cam_ctx_ops
  5059. cam_isp_ctx_rdi_only_activated_state_machine
  5060. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  5061. /* SOF */
  5062. {
  5063. .ioctl_ops = {},
  5064. .crm_ops = {
  5065. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  5066. },
  5067. .irq_ops = NULL,
  5068. },
  5069. /* APPLIED */
  5070. {
  5071. .ioctl_ops = {},
  5072. .crm_ops = {},
  5073. .irq_ops = NULL,
  5074. },
  5075. /* EPOCH */
  5076. {
  5077. .ioctl_ops = {},
  5078. .crm_ops = {
  5079. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  5080. },
  5081. .irq_ops = NULL,
  5082. },
  5083. /* PRE BUBBLE */
  5084. {
  5085. .ioctl_ops = {},
  5086. .crm_ops = {},
  5087. .irq_ops = NULL,
  5088. },
  5089. /* BUBBLE */
  5090. {
  5091. .ioctl_ops = {},
  5092. .crm_ops = {},
  5093. .irq_ops = NULL,
  5094. },
  5095. /* HW ERROR */
  5096. {
  5097. .ioctl_ops = {},
  5098. .crm_ops = {},
  5099. .irq_ops = NULL,
  5100. },
  5101. /* HALT */
  5102. {
  5103. .ioctl_ops = {},
  5104. .crm_ops = {},
  5105. .irq_ops = NULL,
  5106. },
  5107. };
  5108. static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
  5109. struct cam_flush_dev_cmd *cmd)
  5110. {
  5111. struct cam_isp_context *ctx_isp = ctx->ctx_priv;
  5112. struct cam_req_mgr_flush_request flush_req;
  5113. if (!ctx_isp->offline_context) {
  5114. CAM_ERR(CAM_ISP, "flush dev only supported in offline context");
  5115. return -EINVAL;
  5116. }
  5117. flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL :
  5118. CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ;
  5119. flush_req.req_id = cmd->req_id;
  5120. CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu)", flush_req.type, flush_req.req_id);
  5121. switch (ctx->state) {
  5122. case CAM_CTX_ACQUIRED:
  5123. case CAM_CTX_ACTIVATED:
  5124. return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req);
  5125. case CAM_CTX_READY:
  5126. return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req);
  5127. default:
  5128. CAM_ERR(CAM_ISP, "flush dev in wrong state: %d", ctx->state);
  5129. return -EINVAL;
  5130. }
  5131. if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
  5132. cam_req_mgr_workq_flush(ctx_isp->workq);
  5133. }
  5134. static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
  5135. {
  5136. int i;
  5137. if (ctx->out_map_entries) {
  5138. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5139. kfree(ctx->out_map_entries[i]);
  5140. ctx->out_map_entries[i] = NULL;
  5141. }
  5142. kfree(ctx->out_map_entries);
  5143. ctx->out_map_entries = NULL;
  5144. }
  5145. if (ctx->in_map_entries) {
  5146. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5147. kfree(ctx->in_map_entries[i]);
  5148. ctx->in_map_entries[i] = NULL;
  5149. }
  5150. kfree(ctx->in_map_entries);
  5151. ctx->in_map_entries = NULL;
  5152. }
  5153. if (ctx->hw_update_entry) {
  5154. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5155. kfree(ctx->hw_update_entry[i]);
  5156. ctx->hw_update_entry[i] = NULL;
  5157. }
  5158. kfree(ctx->hw_update_entry);
  5159. ctx->hw_update_entry = NULL;
  5160. }
  5161. ctx->max_out_map_entries = 0;
  5162. ctx->max_in_map_entries = 0;
  5163. ctx->max_hw_update_entries = 0;
  5164. }
  5165. static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
  5166. void *cmd)
  5167. {
  5168. int rc = 0;
  5169. struct cam_hw_release_args rel_arg;
  5170. struct cam_isp_context *ctx_isp =
  5171. (struct cam_isp_context *) ctx->ctx_priv;
  5172. struct cam_req_mgr_flush_request flush_req;
  5173. int i;
  5174. if (ctx_isp->hw_ctx) {
  5175. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5176. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5177. &rel_arg);
  5178. ctx_isp->hw_ctx = NULL;
  5179. } else {
  5180. CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u]", ctx->ctx_id);
  5181. }
  5182. ctx->last_flush_req = 0;
  5183. ctx_isp->custom_enabled = false;
  5184. ctx_isp->use_frame_header_ts = false;
  5185. ctx_isp->use_default_apply = false;
  5186. ctx_isp->frame_id = 0;
  5187. ctx_isp->active_req_cnt = 0;
  5188. ctx_isp->reported_req_id = 0;
  5189. ctx_isp->reported_frame_id = 0;
  5190. ctx_isp->hw_acquired = false;
  5191. ctx_isp->init_received = false;
  5192. ctx_isp->support_consumed_addr = false;
  5193. ctx_isp->aeb_enabled = false;
  5194. ctx_isp->req_info.last_bufdone_req_id = 0;
  5195. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5196. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5197. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5198. /*
  5199. * Ideally, we should never have any active request here.
  5200. * But we still add some sanity check code here to help the debug
  5201. */
  5202. if (!list_empty(&ctx->active_req_list))
  5203. CAM_WARN(CAM_ISP, "Active list is not empty");
  5204. /* Flush all the pending request list */
  5205. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  5206. flush_req.link_hdl = ctx->link_hdl;
  5207. flush_req.dev_hdl = ctx->dev_hdl;
  5208. flush_req.req_id = 0;
  5209. CAM_DBG(CAM_ISP, "try to flush pending list");
  5210. spin_lock_bh(&ctx->lock);
  5211. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  5212. spin_unlock_bh(&ctx->lock);
  5213. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5214. cam_req_mgr_workq_destroy(&ctx_isp->workq);
  5215. ctx->state = CAM_CTX_ACQUIRED;
  5216. trace_cam_context_state("ISP", ctx);
  5217. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  5218. ctx->ctx_id, ctx->state);
  5219. return rc;
  5220. }
  5221. /* top level state machine */
  5222. static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
  5223. struct cam_release_dev_cmd *cmd)
  5224. {
  5225. int rc = 0;
  5226. int i;
  5227. struct cam_hw_release_args rel_arg;
  5228. struct cam_isp_context *ctx_isp =
  5229. (struct cam_isp_context *) ctx->ctx_priv;
  5230. struct cam_req_mgr_flush_request flush_req;
  5231. if (cmd && ctx_isp->hw_ctx) {
  5232. CAM_ERR(CAM_ISP, "releasing hw");
  5233. __cam_isp_ctx_release_hw_in_top_state(ctx, NULL);
  5234. }
  5235. if (ctx_isp->hw_ctx) {
  5236. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5237. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5238. &rel_arg);
  5239. ctx_isp->hw_ctx = NULL;
  5240. }
  5241. cam_common_release_evt_params(ctx->dev_hdl);
  5242. memset(&ctx_isp->evt_inject_params, 0, sizeof(struct cam_hw_inject_evt_param));
  5243. ctx->session_hdl = -1;
  5244. ctx->dev_hdl = -1;
  5245. ctx->link_hdl = -1;
  5246. ctx->ctx_crm_intf = NULL;
  5247. ctx->last_flush_req = 0;
  5248. ctx_isp->frame_id = 0;
  5249. ctx_isp->active_req_cnt = 0;
  5250. ctx_isp->reported_req_id = 0;
  5251. ctx_isp->reported_frame_id = 0;
  5252. ctx_isp->hw_acquired = false;
  5253. ctx_isp->init_received = false;
  5254. ctx_isp->offline_context = false;
  5255. ctx_isp->vfps_aux_context = false;
  5256. ctx_isp->rdi_only_context = false;
  5257. ctx_isp->req_info.last_bufdone_req_id = 0;
  5258. ctx_isp->v4l2_event_sub_ids = 0;
  5259. ctx_isp->resume_hw_in_flushed = false;
  5260. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5261. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5262. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5263. /*
  5264. * Ideally, we should never have any active request here.
  5265. * But we still add some sanity check code here to help the debug
  5266. */
  5267. if (!list_empty(&ctx->active_req_list))
  5268. CAM_ERR(CAM_ISP, "Active list is not empty");
  5269. /* Flush all the pending request list */
  5270. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  5271. flush_req.link_hdl = ctx->link_hdl;
  5272. flush_req.dev_hdl = ctx->dev_hdl;
  5273. flush_req.req_id = 0;
  5274. CAM_DBG(CAM_ISP, "try to flush pending list");
  5275. spin_lock_bh(&ctx->lock);
  5276. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  5277. spin_unlock_bh(&ctx->lock);
  5278. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5279. ctx->state = CAM_CTX_AVAILABLE;
  5280. trace_cam_context_state("ISP", ctx);
  5281. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  5282. ctx->ctx_id, ctx->state);
  5283. return rc;
  5284. }
  5285. static int __cam_isp_ctx_config_dev_in_top_state(
  5286. struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
  5287. {
  5288. int rc = 0, i;
  5289. struct cam_ctx_request *req = NULL;
  5290. struct cam_isp_ctx_req *req_isp;
  5291. struct cam_packet *packet;
  5292. size_t remain_len = 0;
  5293. struct cam_hw_prepare_update_args cfg = {0};
  5294. struct cam_req_mgr_add_request add_req;
  5295. struct cam_isp_context *ctx_isp =
  5296. (struct cam_isp_context *) ctx->ctx_priv;
  5297. struct cam_hw_cmd_args hw_cmd_args;
  5298. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5299. uint32_t packet_opcode = 0;
  5300. CAM_DBG(CAM_ISP, "get free request object......");
  5301. /* get free request */
  5302. spin_lock_bh(&ctx->lock);
  5303. if (!list_empty(&ctx->free_req_list)) {
  5304. req = list_first_entry(&ctx->free_req_list,
  5305. struct cam_ctx_request, list);
  5306. list_del_init(&req->list);
  5307. }
  5308. spin_unlock_bh(&ctx->lock);
  5309. if (!req) {
  5310. CAM_ERR(CAM_ISP, "No more request obj free");
  5311. return -ENOMEM;
  5312. }
  5313. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5314. remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet);
  5315. if (IS_ERR(packet)) {
  5316. rc = PTR_ERR(packet);
  5317. goto free_req;
  5318. }
  5319. /* Query the packet opcode */
  5320. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5321. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5322. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE;
  5323. isp_hw_cmd_args.cmd_data = (void *)packet;
  5324. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5325. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5326. &hw_cmd_args);
  5327. if (rc) {
  5328. CAM_ERR(CAM_ISP, "HW command failed");
  5329. goto free_req;
  5330. }
  5331. packet_opcode = isp_hw_cmd_args.u.packet_op_code;
  5332. if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV)
  5333. && (packet->header.request_id <= ctx->last_flush_req)) {
  5334. CAM_INFO(CAM_ISP,
  5335. "request %lld has been flushed, reject packet",
  5336. packet->header.request_id);
  5337. rc = -EBADR;
  5338. goto free_req;
  5339. } else if ((packet_opcode == CAM_ISP_PACKET_INIT_DEV)
  5340. && (packet->header.request_id <= ctx->last_flush_req)
  5341. && ctx->last_flush_req && packet->header.request_id) {
  5342. CAM_WARN(CAM_ISP,
  5343. "last flushed req is %lld, config dev(init) for req %lld",
  5344. ctx->last_flush_req, packet->header.request_id);
  5345. rc = -EBADR;
  5346. goto free_req;
  5347. }
  5348. cfg.packet = packet;
  5349. cfg.remain_len = remain_len;
  5350. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5351. cfg.max_hw_update_entries = ctx->max_hw_update_entries;
  5352. cfg.hw_update_entries = req_isp->cfg;
  5353. cfg.max_out_map_entries = ctx->max_out_map_entries;
  5354. cfg.max_in_map_entries = ctx->max_in_map_entries;
  5355. cfg.out_map_entries = req_isp->fence_map_out;
  5356. cfg.in_map_entries = req_isp->fence_map_in;
  5357. cfg.priv = &req_isp->hw_update_data;
  5358. cfg.pf_data = &(req->pf_data);
  5359. cfg.num_out_map_entries = 0;
  5360. cfg.num_in_map_entries = 0;
  5361. memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
  5362. rc = ctx->hw_mgr_intf->hw_prepare_update(
  5363. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  5364. if (rc != 0) {
  5365. CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
  5366. rc = -EFAULT;
  5367. goto free_req;
  5368. }
  5369. req_isp->num_cfg = cfg.num_hw_update_entries;
  5370. req_isp->num_fence_map_out = cfg.num_out_map_entries;
  5371. req_isp->num_fence_map_in = cfg.num_in_map_entries;
  5372. req_isp->num_acked = 0;
  5373. req_isp->num_deferred_acks = 0;
  5374. req_isp->bubble_detected = false;
  5375. req_isp->cdm_reset_before_apply = false;
  5376. req_isp->hw_update_data.packet = packet;
  5377. req->pf_data.packet_handle = cmd->packet_handle;
  5378. req->pf_data.packet_offset = cmd->offset;
  5379. req->pf_data.req = req;
  5380. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  5381. rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
  5382. if (rc) {
  5383. CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
  5384. req_isp->fence_map_out[i].sync_id);
  5385. goto put_ref;
  5386. }
  5387. }
  5388. CAM_DBG(CAM_ISP,
  5389. "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d",
  5390. packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
  5391. req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in);
  5392. req->request_id = packet->header.request_id;
  5393. req->status = 1;
  5394. if (req_isp->hw_update_data.packet_opcode_type ==
  5395. CAM_ISP_PACKET_INIT_DEV) {
  5396. if (ctx->state < CAM_CTX_ACTIVATED) {
  5397. rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
  5398. if (rc)
  5399. CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
  5400. ctx_isp->init_received = true;
  5401. if ((ctx_isp->vfps_aux_context) && (req->request_id > 0))
  5402. ctx_isp->resume_hw_in_flushed = true;
  5403. else
  5404. ctx_isp->resume_hw_in_flushed = false;
  5405. } else {
  5406. rc = -EINVAL;
  5407. CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state:%d",
  5408. ctx->state);
  5409. }
  5410. } else {
  5411. if ((ctx->state == CAM_CTX_FLUSHED) || (ctx->state < CAM_CTX_READY)) {
  5412. rc = -EINVAL;
  5413. CAM_ERR(CAM_ISP, "Received update req %lld in wrong state:%d",
  5414. req->request_id, ctx->state);
  5415. goto put_ref;
  5416. }
  5417. if ((ctx_isp->offline_context) || (ctx_isp->vfps_aux_context)) {
  5418. __cam_isp_ctx_enqueue_request_in_order(ctx, req, true);
  5419. } else if (ctx->ctx_crm_intf->add_req) {
  5420. memset(&add_req, 0, sizeof(add_req));
  5421. add_req.link_hdl = ctx->link_hdl;
  5422. add_req.dev_hdl = ctx->dev_hdl;
  5423. add_req.req_id = req->request_id;
  5424. rc = ctx->ctx_crm_intf->add_req(&add_req);
  5425. if (rc) {
  5426. if (rc == -EBADR)
  5427. CAM_INFO(CAM_ISP,
  5428. "Add req failed: req id=%llu, it has been flushed",
  5429. req->request_id);
  5430. else
  5431. CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
  5432. req->request_id);
  5433. } else {
  5434. __cam_isp_ctx_enqueue_request_in_order(
  5435. ctx, req, true);
  5436. }
  5437. } else {
  5438. CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu", req->request_id);
  5439. rc = -ENODEV;
  5440. }
  5441. }
  5442. if (rc)
  5443. goto put_ref;
  5444. CAM_DBG(CAM_REQ,
  5445. "Preprocessing Config req_id %lld successful on ctx %u",
  5446. req->request_id, ctx->ctx_id);
  5447. if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch))
  5448. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  5449. else if (ctx_isp->vfps_aux_context &&
  5450. (req_isp->hw_update_data.packet_opcode_type != CAM_ISP_PACKET_INIT_DEV))
  5451. __cam_isp_ctx_schedule_apply_req(ctx_isp);
  5452. return rc;
  5453. put_ref:
  5454. for (--i; i >= 0; i--) {
  5455. if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id))
  5456. CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
  5457. req_isp->fence_map_out[i].sync_id);
  5458. }
  5459. free_req:
  5460. spin_lock_bh(&ctx->lock);
  5461. list_add_tail(&req->list, &ctx->free_req_list);
  5462. spin_unlock_bh(&ctx->lock);
  5463. return rc;
  5464. }
  5465. static int __cam_isp_ctx_allocate_mem_hw_entries(
  5466. struct cam_context *ctx,
  5467. struct cam_hw_acquire_args *param)
  5468. {
  5469. int rc = 0, i;
  5470. uint32_t max_res = 0;
  5471. uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX;
  5472. struct cam_ctx_request *req;
  5473. struct cam_ctx_request *temp_req;
  5474. struct cam_isp_ctx_req *req_isp;
  5475. if (!param->op_params.param_list[0])
  5476. max_res = CAM_ISP_CTX_RES_MAX;
  5477. else {
  5478. max_res = param->op_params.param_list[0];
  5479. if (param->op_flags & CAM_IFE_CTX_SFE_EN) {
  5480. max_res += param->op_params.param_list[1];
  5481. max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX;
  5482. }
  5483. }
  5484. ctx->max_in_map_entries = max_res;
  5485. ctx->max_out_map_entries = max_res;
  5486. ctx->max_hw_update_entries = max_hw_upd_entries;
  5487. CAM_DBG(CAM_ISP,
  5488. "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d",
  5489. max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN));
  5490. ctx->hw_update_entry = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_update_entry *),
  5491. GFP_KERNEL);
  5492. if (!ctx->hw_update_entry) {
  5493. CAM_ERR(CAM_CTXT, "%s[%d] no memory ",
  5494. ctx->dev_name, ctx->ctx_id);
  5495. return -ENOMEM;
  5496. }
  5497. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5498. ctx->hw_update_entry[i] = kcalloc(ctx->max_hw_update_entries,
  5499. sizeof(struct cam_hw_update_entry), GFP_KERNEL);
  5500. if (!ctx->hw_update_entry[i]) {
  5501. CAM_ERR(CAM_CTXT, "%s[%d] no memory for hw_update_entry: %u",
  5502. ctx->dev_name, ctx->ctx_id, i);
  5503. return -ENOMEM;
  5504. }
  5505. }
  5506. ctx->in_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5507. GFP_KERNEL);
  5508. if (!ctx->in_map_entries) {
  5509. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries",
  5510. ctx->dev_name, ctx->ctx_id);
  5511. rc = -ENOMEM;
  5512. goto end;
  5513. }
  5514. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5515. ctx->in_map_entries[i] = kcalloc(ctx->max_in_map_entries,
  5516. sizeof(struct cam_hw_fence_map_entry),
  5517. GFP_KERNEL);
  5518. if (!ctx->in_map_entries[i]) {
  5519. CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries: %u",
  5520. ctx->dev_name, ctx->ctx_id, i);
  5521. rc = -ENOMEM;
  5522. goto end;
  5523. }
  5524. }
  5525. ctx->out_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
  5526. GFP_KERNEL);
  5527. if (!ctx->out_map_entries) {
  5528. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries",
  5529. ctx->dev_name, ctx->ctx_id);
  5530. rc = -ENOMEM;
  5531. goto end;
  5532. }
  5533. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5534. ctx->out_map_entries[i] = kcalloc(ctx->max_out_map_entries,
  5535. sizeof(struct cam_hw_fence_map_entry),
  5536. GFP_KERNEL);
  5537. if (!ctx->out_map_entries[i]) {
  5538. CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries: %u",
  5539. ctx->dev_name, ctx->ctx_id, i);
  5540. rc = -ENOMEM;
  5541. goto end;
  5542. }
  5543. }
  5544. list_for_each_entry_safe(req, temp_req,
  5545. &ctx->free_req_list, list) {
  5546. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5547. req_isp->cfg = ctx->hw_update_entry[req->index];
  5548. req_isp->fence_map_in = ctx->in_map_entries[req->index];
  5549. req_isp->fence_map_out = ctx->out_map_entries[req->index];
  5550. }
  5551. return rc;
  5552. end:
  5553. __cam_isp_ctx_free_mem_hw_entries(ctx);
  5554. return rc;
  5555. }
  5556. static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
  5557. struct cam_acquire_dev_cmd *cmd)
  5558. {
  5559. int rc = 0;
  5560. int i;
  5561. struct cam_hw_acquire_args param;
  5562. struct cam_isp_resource *isp_res = NULL;
  5563. struct cam_create_dev_hdl req_hdl_param;
  5564. struct cam_hw_release_args release;
  5565. struct cam_isp_context *ctx_isp =
  5566. (struct cam_isp_context *) ctx->ctx_priv;
  5567. struct cam_hw_cmd_args hw_cmd_args;
  5568. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5569. if (!ctx->hw_mgr_intf) {
  5570. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5571. rc = -EFAULT;
  5572. goto end;
  5573. }
  5574. CAM_DBG(CAM_ISP,
  5575. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  5576. cmd->session_handle, cmd->num_resources,
  5577. cmd->handle_type, cmd->resource_hdl);
  5578. ctx_isp->v4l2_event_sub_ids = cam_req_mgr_get_id_subscribed();
  5579. if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
  5580. ctx_isp->split_acquire = true;
  5581. CAM_DBG(CAM_ISP, "Acquire dev handle");
  5582. goto get_dev_handle;
  5583. }
  5584. if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
  5585. CAM_ERR(CAM_ISP, "Too much resources in the acquire");
  5586. rc = -ENOMEM;
  5587. goto end;
  5588. }
  5589. /* for now we only support user pointer */
  5590. if (cmd->handle_type != 1) {
  5591. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5592. rc = -EINVAL;
  5593. goto end;
  5594. }
  5595. isp_res = kzalloc(
  5596. sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
  5597. if (!isp_res) {
  5598. rc = -ENOMEM;
  5599. goto end;
  5600. }
  5601. CAM_DBG(CAM_ISP, "start copy %d resources from user",
  5602. cmd->num_resources);
  5603. if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
  5604. sizeof(*isp_res)*cmd->num_resources)) {
  5605. rc = -EFAULT;
  5606. goto free_res;
  5607. }
  5608. memset(&param, 0, sizeof(param));
  5609. param.context_data = ctx;
  5610. param.event_cb = ctx->irq_cb_intf;
  5611. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5612. param.num_acq = cmd->num_resources;
  5613. param.acquire_info = (uintptr_t) isp_res;
  5614. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5615. if (rc) {
  5616. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5617. ctx->ctx_id);
  5618. goto free_res;
  5619. }
  5620. /* call HW manager to reserve the resource */
  5621. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5622. &param);
  5623. if (rc != 0) {
  5624. CAM_ERR(CAM_ISP, "Acquire device failed");
  5625. goto free_res;
  5626. }
  5627. /* Query the context has rdi only resource */
  5628. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5629. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5630. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5631. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5632. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5633. &hw_cmd_args);
  5634. if (rc) {
  5635. CAM_ERR(CAM_ISP, "HW command failed");
  5636. goto free_hw;
  5637. }
  5638. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5639. /*
  5640. * this context has rdi only resource assign rdi only
  5641. * state machine
  5642. */
  5643. CAM_DBG(CAM_ISP, "RDI only session Context");
  5644. ctx_isp->substate_machine_irq =
  5645. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5646. ctx_isp->substate_machine =
  5647. cam_isp_ctx_rdi_only_activated_state_machine;
  5648. ctx_isp->rdi_only_context = true;
  5649. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5650. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5651. ctx_isp->substate_machine_irq =
  5652. cam_isp_ctx_fs2_state_machine_irq;
  5653. ctx_isp->substate_machine =
  5654. cam_isp_ctx_fs2_state_machine;
  5655. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5656. CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
  5657. ctx_isp->substate_machine_irq =
  5658. cam_isp_ctx_offline_state_machine_irq;
  5659. } else {
  5660. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5661. ctx_isp->substate_machine_irq =
  5662. cam_isp_ctx_activated_state_machine_irq;
  5663. ctx_isp->substate_machine =
  5664. cam_isp_ctx_activated_state_machine;
  5665. }
  5666. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5667. ctx_isp->hw_acquired = true;
  5668. ctx_isp->split_acquire = false;
  5669. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5670. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5671. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5672. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5673. kfree(isp_res);
  5674. isp_res = NULL;
  5675. get_dev_handle:
  5676. req_hdl_param.session_hdl = cmd->session_handle;
  5677. /* bridge is not ready for these flags. so false for now */
  5678. req_hdl_param.v4l2_sub_dev_flag = 0;
  5679. req_hdl_param.media_entity_flag = 0;
  5680. req_hdl_param.ops = ctx->crm_ctx_intf;
  5681. req_hdl_param.priv = ctx;
  5682. req_hdl_param.dev_id = CAM_ISP;
  5683. CAM_DBG(CAM_ISP, "get device handle form bridge");
  5684. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  5685. if (ctx->dev_hdl <= 0) {
  5686. rc = -EFAULT;
  5687. CAM_ERR(CAM_ISP, "Can not create device handle");
  5688. goto free_hw;
  5689. }
  5690. cmd->dev_handle = ctx->dev_hdl;
  5691. /* store session information */
  5692. ctx->session_hdl = cmd->session_handle;
  5693. ctx->state = CAM_CTX_ACQUIRED;
  5694. trace_cam_context_state("ISP", ctx);
  5695. CAM_DBG(CAM_ISP,
  5696. "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
  5697. cmd->session_handle, cmd->num_resources, ctx->ctx_id);
  5698. return rc;
  5699. free_hw:
  5700. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5701. if (ctx_isp->hw_acquired)
  5702. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  5703. &release);
  5704. ctx_isp->hw_ctx = NULL;
  5705. ctx_isp->hw_acquired = false;
  5706. free_res:
  5707. kfree(isp_res);
  5708. end:
  5709. return rc;
  5710. }
  5711. static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
  5712. void *args)
  5713. {
  5714. int rc = 0;
  5715. int i;
  5716. struct cam_acquire_hw_cmd_v1 *cmd =
  5717. (struct cam_acquire_hw_cmd_v1 *)args;
  5718. struct cam_hw_acquire_args param;
  5719. struct cam_hw_release_args release;
  5720. struct cam_isp_context *ctx_isp =
  5721. (struct cam_isp_context *) ctx->ctx_priv;
  5722. struct cam_hw_cmd_args hw_cmd_args;
  5723. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5724. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5725. if (!ctx->hw_mgr_intf) {
  5726. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5727. rc = -EFAULT;
  5728. goto end;
  5729. }
  5730. CAM_DBG(CAM_ISP,
  5731. "session_hdl 0x%x, hdl type %d, res %lld",
  5732. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5733. /* for now we only support user pointer */
  5734. if (cmd->handle_type != 1) {
  5735. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5736. rc = -EINVAL;
  5737. goto end;
  5738. }
  5739. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5740. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5741. goto end;
  5742. }
  5743. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5744. if (!acquire_hw_info) {
  5745. rc = -ENOMEM;
  5746. goto end;
  5747. }
  5748. CAM_DBG(CAM_ISP, "start copy resources from user");
  5749. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5750. cmd->data_size)) {
  5751. rc = -EFAULT;
  5752. goto free_res;
  5753. }
  5754. memset(&param, 0, sizeof(param));
  5755. param.context_data = ctx;
  5756. param.event_cb = ctx->irq_cb_intf;
  5757. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5758. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5759. param.acquire_info_size = cmd->data_size;
  5760. param.acquire_info = (uint64_t) acquire_hw_info;
  5761. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5762. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
  5763. &param);
  5764. if (rc) {
  5765. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5766. ctx->ctx_id);
  5767. goto free_res;
  5768. }
  5769. /* call HW manager to reserve the resource */
  5770. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5771. &param);
  5772. if (rc != 0) {
  5773. CAM_ERR(CAM_ISP, "Acquire device failed");
  5774. goto free_res;
  5775. }
  5776. ctx_isp->support_consumed_addr =
  5777. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5778. /* Query the context has rdi only resource */
  5779. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5780. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5781. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5782. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5783. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5784. &hw_cmd_args);
  5785. if (rc) {
  5786. CAM_ERR(CAM_ISP, "HW command failed");
  5787. goto free_hw;
  5788. }
  5789. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5790. /*
  5791. * this context has rdi only resource assign rdi only
  5792. * state machine
  5793. */
  5794. CAM_DBG(CAM_ISP, "RDI only session Context");
  5795. ctx_isp->substate_machine_irq =
  5796. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5797. ctx_isp->substate_machine =
  5798. cam_isp_ctx_rdi_only_activated_state_machine;
  5799. ctx_isp->rdi_only_context = true;
  5800. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5801. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5802. ctx_isp->substate_machine_irq =
  5803. cam_isp_ctx_fs2_state_machine_irq;
  5804. ctx_isp->substate_machine =
  5805. cam_isp_ctx_fs2_state_machine;
  5806. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5807. CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
  5808. ctx_isp->substate_machine_irq =
  5809. cam_isp_ctx_offline_state_machine_irq;
  5810. ctx_isp->substate_machine = NULL;
  5811. } else {
  5812. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5813. ctx_isp->substate_machine_irq =
  5814. cam_isp_ctx_activated_state_machine_irq;
  5815. ctx_isp->substate_machine =
  5816. cam_isp_ctx_activated_state_machine;
  5817. }
  5818. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5819. ctx_isp->hw_acquired = true;
  5820. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5821. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5822. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5823. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5824. trace_cam_context_state("ISP", ctx);
  5825. CAM_DBG(CAM_ISP,
  5826. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5827. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5828. kfree(acquire_hw_info);
  5829. return rc;
  5830. free_hw:
  5831. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5832. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5833. ctx_isp->hw_ctx = NULL;
  5834. ctx_isp->hw_acquired = false;
  5835. free_res:
  5836. kfree(acquire_hw_info);
  5837. end:
  5838. return rc;
  5839. }
  5840. static void cam_req_mgr_process_workq_apply_req_worker(struct work_struct *w)
  5841. {
  5842. cam_req_mgr_process_workq(w);
  5843. }
  5844. static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
  5845. void *args)
  5846. {
  5847. int rc = 0, i, j;
  5848. struct cam_acquire_hw_cmd_v2 *cmd =
  5849. (struct cam_acquire_hw_cmd_v2 *)args;
  5850. struct cam_hw_acquire_args param;
  5851. struct cam_hw_release_args release;
  5852. struct cam_isp_context *ctx_isp =
  5853. (struct cam_isp_context *) ctx->ctx_priv;
  5854. struct cam_hw_cmd_args hw_cmd_args;
  5855. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5856. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  5857. if (!ctx->hw_mgr_intf) {
  5858. CAM_ERR(CAM_ISP, "HW interface is not ready");
  5859. rc = -EFAULT;
  5860. goto end;
  5861. }
  5862. CAM_DBG(CAM_ISP,
  5863. "session_hdl 0x%x, hdl type %d, res %lld",
  5864. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  5865. /* for now we only support user pointer */
  5866. if (cmd->handle_type != 1) {
  5867. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  5868. rc = -EINVAL;
  5869. goto end;
  5870. }
  5871. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  5872. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  5873. goto end;
  5874. }
  5875. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  5876. if (!acquire_hw_info) {
  5877. rc = -ENOMEM;
  5878. goto end;
  5879. }
  5880. CAM_DBG(CAM_ISP, "start copy resources from user");
  5881. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  5882. cmd->data_size)) {
  5883. rc = -EFAULT;
  5884. goto free_res;
  5885. }
  5886. memset(&param, 0, sizeof(param));
  5887. param.context_data = ctx;
  5888. param.event_cb = ctx->irq_cb_intf;
  5889. param.sec_pf_evt_cb = cam_context_dump_pf_info;
  5890. param.num_acq = CAM_API_COMPAT_CONSTANT;
  5891. param.acquire_info_size = cmd->data_size;
  5892. param.acquire_info = (uint64_t) acquire_hw_info;
  5893. param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
  5894. /* call HW manager to reserve the resource */
  5895. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  5896. &param);
  5897. if (rc != 0) {
  5898. CAM_ERR(CAM_ISP, "Acquire device failed");
  5899. goto free_res;
  5900. }
  5901. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  5902. if (rc) {
  5903. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  5904. ctx->ctx_id);
  5905. goto free_hw;
  5906. }
  5907. /*
  5908. * Set feature flag if applicable
  5909. * custom hw is supported only on v2
  5910. */
  5911. ctx_isp->custom_enabled =
  5912. (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
  5913. ctx_isp->use_frame_header_ts =
  5914. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  5915. ctx_isp->use_default_apply =
  5916. (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG);
  5917. ctx_isp->support_consumed_addr =
  5918. (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
  5919. ctx_isp->aeb_enabled =
  5920. (param.op_flags & CAM_IFE_CTX_AEB_EN);
  5921. /* Query the context has rdi only resource */
  5922. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  5923. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5924. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  5925. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5926. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5927. &hw_cmd_args);
  5928. if (rc) {
  5929. CAM_ERR(CAM_ISP, "HW command failed");
  5930. goto free_hw;
  5931. }
  5932. if (param.valid_acquired_hw) {
  5933. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5934. cmd->hw_info.acquired_hw_id[i] =
  5935. param.acquired_hw_id[i];
  5936. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  5937. for (j = 0; j < CAM_MAX_HW_SPLIT; j++)
  5938. cmd->hw_info.acquired_hw_path[i][j] =
  5939. param.acquired_hw_path[i][j];
  5940. }
  5941. cmd->hw_info.valid_acquired_hw =
  5942. param.valid_acquired_hw;
  5943. cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw;
  5944. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  5945. /*
  5946. * this context has rdi only resource assign rdi only
  5947. * state machine
  5948. */
  5949. CAM_DBG(CAM_ISP, "RDI only session Context");
  5950. ctx_isp->substate_machine_irq =
  5951. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  5952. ctx_isp->substate_machine =
  5953. cam_isp_ctx_rdi_only_activated_state_machine;
  5954. ctx_isp->rdi_only_context = true;
  5955. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  5956. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  5957. ctx_isp->substate_machine_irq =
  5958. cam_isp_ctx_fs2_state_machine_irq;
  5959. ctx_isp->substate_machine =
  5960. cam_isp_ctx_fs2_state_machine;
  5961. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  5962. CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
  5963. ctx_isp->substate_machine_irq =
  5964. cam_isp_ctx_offline_state_machine_irq;
  5965. ctx_isp->substate_machine = NULL;
  5966. ctx_isp->offline_context = true;
  5967. } else {
  5968. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  5969. ctx_isp->substate_machine_irq =
  5970. cam_isp_ctx_activated_state_machine_irq;
  5971. ctx_isp->substate_machine =
  5972. cam_isp_ctx_activated_state_machine;
  5973. }
  5974. if (ctx_isp->offline_context || ctx_isp->vfps_aux_context) {
  5975. rc = cam_req_mgr_workq_create("ife_apply_req", 20,
  5976. &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0,
  5977. cam_req_mgr_process_workq_apply_req_worker);
  5978. if (rc)
  5979. CAM_ERR(CAM_ISP,
  5980. "Failed to create workq for IFE rc:%d offline: %s vfps: %s",
  5981. rc, CAM_BOOL_TO_YESNO(ctx_isp->offline_context),
  5982. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context));
  5983. }
  5984. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  5985. ctx_isp->hw_acquired = true;
  5986. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  5987. trace_cam_context_state("ISP", ctx);
  5988. CAM_DBG(CAM_ISP,
  5989. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  5990. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  5991. kfree(acquire_hw_info);
  5992. return rc;
  5993. free_hw:
  5994. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5995. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  5996. ctx_isp->hw_ctx = NULL;
  5997. ctx_isp->hw_acquired = false;
  5998. free_res:
  5999. kfree(acquire_hw_info);
  6000. end:
  6001. return rc;
  6002. }
  6003. static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
  6004. void *args)
  6005. {
  6006. int rc = -EINVAL;
  6007. uint32_t api_version;
  6008. if (!ctx || !args) {
  6009. CAM_ERR(CAM_ISP, "Invalid input pointer");
  6010. return rc;
  6011. }
  6012. api_version = *((uint32_t *)args);
  6013. if (api_version == 1)
  6014. rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
  6015. else if (api_version == 2)
  6016. rc = __cam_isp_ctx_acquire_hw_v2(ctx, args);
  6017. else
  6018. CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
  6019. return rc;
  6020. }
  6021. static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
  6022. struct cam_config_dev_cmd *cmd)
  6023. {
  6024. int rc = 0;
  6025. struct cam_isp_context *ctx_isp =
  6026. (struct cam_isp_context *) ctx->ctx_priv;
  6027. if (!ctx_isp->hw_acquired) {
  6028. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  6029. return -EINVAL;
  6030. }
  6031. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  6032. if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
  6033. ctx->state = CAM_CTX_READY;
  6034. trace_cam_context_state("ISP", ctx);
  6035. }
  6036. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  6037. return rc;
  6038. }
  6039. static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
  6040. struct cam_config_dev_cmd *cmd)
  6041. {
  6042. int rc = 0;
  6043. struct cam_start_stop_dev_cmd start_cmd;
  6044. struct cam_hw_cmd_args hw_cmd_args;
  6045. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6046. struct cam_isp_context *ctx_isp =
  6047. (struct cam_isp_context *) ctx->ctx_priv;
  6048. if (!ctx_isp->hw_acquired) {
  6049. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  6050. rc = -EINVAL;
  6051. goto end;
  6052. }
  6053. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  6054. if (rc)
  6055. goto end;
  6056. if (!ctx_isp->init_received) {
  6057. CAM_WARN(CAM_ISP,
  6058. "Received update packet in flushed state, skip start");
  6059. goto end;
  6060. }
  6061. CAM_DBG(CAM_ISP, "vfps_ctx:%s resume_hw_in_flushed:%d ctx:%d",
  6062. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context),
  6063. ctx_isp->resume_hw_in_flushed,
  6064. ctx->ctx_id);
  6065. if (ctx_isp->vfps_aux_context) {
  6066. /* Resume the HW only when we get first valid req */
  6067. if (!ctx_isp->resume_hw_in_flushed)
  6068. goto end;
  6069. else
  6070. ctx_isp->resume_hw_in_flushed = false;
  6071. }
  6072. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6073. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6074. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6075. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6076. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6077. &hw_cmd_args);
  6078. if (rc) {
  6079. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
  6080. goto end;
  6081. }
  6082. start_cmd.dev_handle = cmd->dev_handle;
  6083. start_cmd.session_handle = cmd->session_handle;
  6084. rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
  6085. if (rc)
  6086. CAM_ERR(CAM_ISP,
  6087. "Failed to re-start HW after flush rc: %d", rc);
  6088. else
  6089. CAM_INFO(CAM_ISP,
  6090. "Received init after flush. Re-start HW complete in ctx:%d",
  6091. ctx->ctx_id);
  6092. end:
  6093. CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
  6094. ctx_isp->substate_activated);
  6095. return rc;
  6096. }
  6097. static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
  6098. struct cam_req_mgr_core_dev_link_setup *link)
  6099. {
  6100. int rc = 0;
  6101. struct cam_isp_context *ctx_isp =
  6102. (struct cam_isp_context *) ctx->ctx_priv;
  6103. if (!link) {
  6104. CAM_ERR(CAM_ISP, "setup link info is null: %pK ctx: %u",
  6105. link, ctx->ctx_id);
  6106. return -EINVAL;
  6107. }
  6108. if (!link->crm_cb) {
  6109. CAM_ERR(CAM_ISP, "crm cb is null: %pK ctx: %u",
  6110. link->crm_cb, ctx->ctx_id);
  6111. return -EINVAL;
  6112. }
  6113. CAM_DBG(CAM_ISP, "Enter.........");
  6114. ctx->link_hdl = link->link_hdl;
  6115. ctx->ctx_crm_intf = link->crm_cb;
  6116. ctx_isp->subscribe_event =
  6117. CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF;
  6118. ctx_isp->trigger_id = link->trigger_id;
  6119. /* change state only if we had the init config */
  6120. if (ctx_isp->init_received) {
  6121. ctx->state = CAM_CTX_READY;
  6122. trace_cam_context_state("ISP", ctx);
  6123. }
  6124. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  6125. return rc;
  6126. }
  6127. static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
  6128. struct cam_req_mgr_core_dev_link_setup *unlink)
  6129. {
  6130. int rc = 0;
  6131. struct cam_isp_context *ctx_isp =
  6132. (struct cam_isp_context *) ctx->ctx_priv;
  6133. ctx->link_hdl = -1;
  6134. ctx->ctx_crm_intf = NULL;
  6135. ctx_isp->trigger_id = -1;
  6136. return rc;
  6137. }
  6138. static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  6139. struct cam_req_mgr_device_info *dev_info)
  6140. {
  6141. int rc = 0;
  6142. dev_info->dev_hdl = ctx->dev_hdl;
  6143. strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
  6144. dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
  6145. dev_info->p_delay = 1;
  6146. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  6147. dev_info->trigger_on = true;
  6148. return rc;
  6149. }
  6150. static inline void __cam_isp_context_reset_ctx_params(
  6151. struct cam_isp_context *ctx_isp)
  6152. {
  6153. atomic_set(&ctx_isp->process_bubble, 0);
  6154. atomic_set(&ctx_isp->rxd_epoch, 0);
  6155. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6156. ctx_isp->frame_id = 0;
  6157. ctx_isp->sof_timestamp_val = 0;
  6158. ctx_isp->boot_timestamp = 0;
  6159. ctx_isp->active_req_cnt = 0;
  6160. ctx_isp->reported_req_id = 0;
  6161. ctx_isp->reported_frame_id = 0;
  6162. ctx_isp->bubble_frame_cnt = 0;
  6163. ctx_isp->recovery_req_id = 0;
  6164. ctx_isp->aeb_error_cnt = 0;
  6165. }
  6166. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  6167. struct cam_start_stop_dev_cmd *cmd)
  6168. {
  6169. int rc = 0;
  6170. int i;
  6171. struct cam_isp_start_args start_isp;
  6172. struct cam_ctx_request *req;
  6173. struct cam_isp_ctx_req *req_isp;
  6174. struct cam_isp_context *ctx_isp =
  6175. (struct cam_isp_context *) ctx->ctx_priv;
  6176. if (cmd->session_handle != ctx->session_hdl ||
  6177. cmd->dev_handle != ctx->dev_hdl) {
  6178. rc = -EPERM;
  6179. goto end;
  6180. }
  6181. if (list_empty(&ctx->pending_req_list)) {
  6182. /* should never happen */
  6183. CAM_ERR(CAM_ISP, "Start device with empty configuration");
  6184. rc = -EFAULT;
  6185. goto end;
  6186. } else {
  6187. req = list_first_entry(&ctx->pending_req_list,
  6188. struct cam_ctx_request, list);
  6189. }
  6190. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6191. if (!ctx_isp->hw_ctx) {
  6192. CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
  6193. rc = -EFAULT;
  6194. goto end;
  6195. }
  6196. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6197. start_isp.hw_config.request_id = req->request_id;
  6198. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6199. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6200. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6201. start_isp.hw_config.init_packet = 1;
  6202. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_NONE;
  6203. start_isp.hw_config.cdm_reset_before_apply = false;
  6204. start_isp.is_internal_start = false;
  6205. ctx_isp->last_applied_req_id = req->request_id;
  6206. if (ctx->state == CAM_CTX_FLUSHED)
  6207. start_isp.start_only = true;
  6208. else
  6209. start_isp.start_only = false;
  6210. __cam_isp_context_reset_ctx_params(ctx_isp);
  6211. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6212. CAM_ISP_CTX_ACTIVATED_APPLIED :
  6213. (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
  6214. CAM_ISP_CTX_ACTIVATED_SOF;
  6215. atomic64_set(&ctx_isp->state_monitor_head, -1);
  6216. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6217. atomic64_set(&ctx_isp->event_record_head[i], -1);
  6218. /*
  6219. * In case of CSID TPG we might receive SOF and RUP IRQs
  6220. * before hw_mgr_intf->hw_start has returned. So move
  6221. * req out of pending list before hw_start and add it
  6222. * back to pending list if hw_start fails.
  6223. */
  6224. list_del_init(&req->list);
  6225. if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
  6226. list_add_tail(&req->list, &ctx->free_req_list);
  6227. atomic_set(&ctx_isp->rxd_epoch, 1);
  6228. CAM_DBG(CAM_REQ,
  6229. "Move pending req: %lld to free list(cnt: %d) offline ctx %u",
  6230. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  6231. } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
  6232. list_add_tail(&req->list, &ctx->wait_req_list);
  6233. CAM_DBG(CAM_REQ,
  6234. "Move pending req: %lld to wait list(cnt: %d) ctx %u",
  6235. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  6236. } else {
  6237. list_add_tail(&req->list, &ctx->active_req_list);
  6238. ctx_isp->active_req_cnt++;
  6239. CAM_DBG(CAM_REQ,
  6240. "Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
  6241. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
  6242. ctx_isp->offline_context);
  6243. }
  6244. /*
  6245. * Only place to change state before calling the hw due to
  6246. * hardware tasklet has higher priority that can cause the
  6247. * irq handling comes early
  6248. */
  6249. ctx->state = CAM_CTX_ACTIVATED;
  6250. trace_cam_context_state("ISP", ctx);
  6251. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6252. &start_isp);
  6253. if (rc) {
  6254. /* HW failure. user need to clean up the resource */
  6255. CAM_ERR(CAM_ISP, "Start HW failed");
  6256. ctx->state = CAM_CTX_READY;
  6257. if ((rc == -ETIMEDOUT) &&
  6258. (isp_ctx_debug.enable_cdm_cmd_buff_dump))
  6259. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  6260. trace_cam_context_state("ISP", ctx);
  6261. list_del_init(&req->list);
  6262. list_add(&req->list, &ctx->pending_req_list);
  6263. goto end;
  6264. }
  6265. CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
  6266. end:
  6267. return rc;
  6268. }
  6269. static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
  6270. struct cam_req_mgr_core_dev_link_setup *unlink)
  6271. {
  6272. int rc = 0;
  6273. ctx->link_hdl = -1;
  6274. ctx->ctx_crm_intf = NULL;
  6275. ctx->state = CAM_CTX_ACQUIRED;
  6276. trace_cam_context_state("ISP", ctx);
  6277. return rc;
  6278. }
  6279. static int __cam_isp_ctx_stop_dev_in_activated_unlock(
  6280. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  6281. {
  6282. int rc = 0;
  6283. uint32_t i;
  6284. struct cam_hw_stop_args stop;
  6285. struct cam_ctx_request *req;
  6286. struct cam_isp_ctx_req *req_isp;
  6287. struct cam_isp_context *ctx_isp =
  6288. (struct cam_isp_context *) ctx->ctx_priv;
  6289. struct cam_isp_stop_args stop_isp;
  6290. /* Mask off all the incoming hardware events */
  6291. spin_lock_bh(&ctx->lock);
  6292. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6293. spin_unlock_bh(&ctx->lock);
  6294. /* stop hw first */
  6295. if (ctx_isp->hw_ctx) {
  6296. stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6297. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6298. stop_isp.stop_only = false;
  6299. stop_isp.is_internal_stop = false;
  6300. stop.args = (void *) &stop_isp;
  6301. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6302. &stop);
  6303. }
  6304. CAM_DBG(CAM_ISP, "next Substate[%s]",
  6305. __cam_isp_ctx_substate_val_to_type(
  6306. ctx_isp->substate_activated));
  6307. if (ctx->ctx_crm_intf &&
  6308. ctx->ctx_crm_intf->notify_stop) {
  6309. struct cam_req_mgr_notify_stop notify;
  6310. notify.link_hdl = ctx->link_hdl;
  6311. CAM_DBG(CAM_ISP,
  6312. "Notify CRM about device stop ctx %u link 0x%x",
  6313. ctx->ctx_id, ctx->link_hdl);
  6314. ctx->ctx_crm_intf->notify_stop(&notify);
  6315. } else if (!ctx_isp->offline_context)
  6316. CAM_ERR(CAM_ISP, "cb not present");
  6317. while (!list_empty(&ctx->pending_req_list)) {
  6318. req = list_first_entry(&ctx->pending_req_list,
  6319. struct cam_ctx_request, list);
  6320. list_del_init(&req->list);
  6321. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6322. CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
  6323. req_isp->num_fence_map_out);
  6324. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6325. if (req_isp->fence_map_out[i].sync_id != -1) {
  6326. cam_sync_signal(
  6327. req_isp->fence_map_out[i].sync_id,
  6328. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6329. CAM_SYNC_ISP_EVENT_HW_STOP);
  6330. }
  6331. list_add_tail(&req->list, &ctx->free_req_list);
  6332. }
  6333. while (!list_empty(&ctx->wait_req_list)) {
  6334. req = list_first_entry(&ctx->wait_req_list,
  6335. struct cam_ctx_request, list);
  6336. list_del_init(&req->list);
  6337. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6338. CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
  6339. req_isp->num_fence_map_out);
  6340. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6341. if (req_isp->fence_map_out[i].sync_id != -1) {
  6342. cam_sync_signal(
  6343. req_isp->fence_map_out[i].sync_id,
  6344. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6345. CAM_SYNC_ISP_EVENT_HW_STOP);
  6346. }
  6347. list_add_tail(&req->list, &ctx->free_req_list);
  6348. }
  6349. while (!list_empty(&ctx->active_req_list)) {
  6350. req = list_first_entry(&ctx->active_req_list,
  6351. struct cam_ctx_request, list);
  6352. list_del_init(&req->list);
  6353. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6354. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  6355. req_isp->num_fence_map_out);
  6356. for (i = 0; i < req_isp->num_fence_map_out; i++)
  6357. if (req_isp->fence_map_out[i].sync_id != -1) {
  6358. cam_sync_signal(
  6359. req_isp->fence_map_out[i].sync_id,
  6360. CAM_SYNC_STATE_SIGNALED_CANCEL,
  6361. CAM_SYNC_ISP_EVENT_HW_STOP);
  6362. }
  6363. list_add_tail(&req->list, &ctx->free_req_list);
  6364. }
  6365. ctx_isp->frame_id = 0;
  6366. ctx_isp->active_req_cnt = 0;
  6367. ctx_isp->reported_req_id = 0;
  6368. ctx_isp->reported_frame_id = 0;
  6369. ctx_isp->last_applied_req_id = 0;
  6370. ctx_isp->req_info.last_bufdone_req_id = 0;
  6371. ctx_isp->bubble_frame_cnt = 0;
  6372. atomic_set(&ctx_isp->process_bubble, 0);
  6373. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6374. atomic_set(&ctx_isp->rxd_epoch, 0);
  6375. atomic64_set(&ctx_isp->state_monitor_head, -1);
  6376. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  6377. atomic64_set(&ctx_isp->event_record_head[i], -1);
  6378. CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
  6379. ctx->state, ctx->ctx_id);
  6380. if (!stop_cmd) {
  6381. rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL);
  6382. if (rc)
  6383. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6384. }
  6385. return rc;
  6386. }
  6387. static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
  6388. struct cam_start_stop_dev_cmd *cmd)
  6389. {
  6390. int rc = 0;
  6391. struct cam_isp_context *ctx_isp =
  6392. (struct cam_isp_context *)ctx->ctx_priv;
  6393. __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
  6394. ctx_isp->init_received = false;
  6395. ctx->state = CAM_CTX_ACQUIRED;
  6396. trace_cam_context_state("ISP", ctx);
  6397. return rc;
  6398. }
  6399. static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
  6400. struct cam_release_dev_cmd *cmd)
  6401. {
  6402. int rc = 0;
  6403. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6404. if (rc)
  6405. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6406. rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
  6407. if (rc)
  6408. CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
  6409. return rc;
  6410. }
  6411. static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
  6412. void *cmd)
  6413. {
  6414. int rc = 0;
  6415. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6416. if (rc)
  6417. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  6418. rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
  6419. if (rc)
  6420. CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
  6421. return rc;
  6422. }
  6423. static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
  6424. {
  6425. int rc = 0;
  6426. struct cam_hw_cmd_args hw_cmd_args;
  6427. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6428. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6429. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6430. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
  6431. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6432. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6433. &hw_cmd_args);
  6434. return rc;
  6435. }
  6436. static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
  6437. {
  6438. int rc = 0;
  6439. struct cam_hw_cmd_args hw_cmd_args;
  6440. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6441. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6442. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6443. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6444. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6445. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6446. &hw_cmd_args);
  6447. return rc;
  6448. }
  6449. static int __cam_isp_ctx_handle_sof_freeze_evt(
  6450. struct cam_context *ctx)
  6451. {
  6452. int rc = 0;
  6453. struct cam_hw_cmd_args hw_cmd_args;
  6454. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6455. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  6456. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6457. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
  6458. isp_hw_cmd_args.u.sof_irq_enable = 1;
  6459. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6460. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6461. &hw_cmd_args);
  6462. return rc;
  6463. }
  6464. static int __cam_isp_ctx_reset_and_recover(
  6465. bool skip_resume, struct cam_context *ctx)
  6466. {
  6467. int rc = 0;
  6468. struct cam_isp_context *ctx_isp =
  6469. (struct cam_isp_context *)ctx->ctx_priv;
  6470. struct cam_isp_stop_args stop_isp;
  6471. struct cam_hw_stop_args stop_args;
  6472. struct cam_isp_start_args start_isp;
  6473. struct cam_hw_cmd_args hw_cmd_args;
  6474. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  6475. struct cam_ctx_request *req;
  6476. struct cam_isp_ctx_req *req_isp;
  6477. spin_lock_bh(&ctx->lock);
  6478. if (ctx_isp->active_req_cnt) {
  6479. spin_unlock_bh(&ctx->lock);
  6480. CAM_WARN(CAM_ISP,
  6481. "Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done",
  6482. ctx_isp->active_req_cnt, ctx->ctx_id,
  6483. ctx->link_hdl, ctx_isp->recovery_req_id);
  6484. goto end;
  6485. }
  6486. if (ctx->state != CAM_CTX_ACTIVATED) {
  6487. spin_unlock_bh(&ctx->lock);
  6488. CAM_ERR(CAM_ISP,
  6489. "In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld",
  6490. ctx->state, ctx->ctx_id,
  6491. ctx->link_hdl, ctx_isp->recovery_req_id);
  6492. rc = -EINVAL;
  6493. goto end;
  6494. }
  6495. if (list_empty(&ctx->pending_req_list)) {
  6496. /* Cannot start with no request */
  6497. spin_unlock_bh(&ctx->lock);
  6498. CAM_ERR(CAM_ISP,
  6499. "Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x",
  6500. ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl);
  6501. rc = -EFAULT;
  6502. goto end;
  6503. }
  6504. if (!ctx_isp->hw_ctx) {
  6505. spin_unlock_bh(&ctx->lock);
  6506. CAM_ERR(CAM_ISP,
  6507. "Invalid hw context pointer ctx: %u on link: 0x%x",
  6508. ctx->ctx_id, ctx->link_hdl);
  6509. rc = -EFAULT;
  6510. goto end;
  6511. }
  6512. /* Block all events till HW is resumed */
  6513. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  6514. req = list_first_entry(&ctx->pending_req_list,
  6515. struct cam_ctx_request, list);
  6516. spin_unlock_bh(&ctx->lock);
  6517. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  6518. CAM_INFO(CAM_ISP,
  6519. "Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x",
  6520. req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl);
  6521. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6522. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  6523. stop_isp.stop_only = true;
  6524. stop_isp.is_internal_stop = true;
  6525. stop_args.args = (void *)&stop_isp;
  6526. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  6527. &stop_args);
  6528. if (rc) {
  6529. CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u",
  6530. rc, ctx->ctx_id);
  6531. goto end;
  6532. }
  6533. CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x",
  6534. ctx->ctx_id, ctx->link_hdl);
  6535. /* API provides provision to stream off and not resume as well in case of fatal errors */
  6536. if (skip_resume) {
  6537. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6538. CAM_INFO(CAM_ISP,
  6539. "Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x",
  6540. ctx->ctx_id, ctx_isp->last_applied_req_id,
  6541. ctx_isp->recovery_req_id, ctx->link_hdl);
  6542. goto end;
  6543. }
  6544. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6545. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  6546. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  6547. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  6548. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  6549. &hw_cmd_args);
  6550. if (rc) {
  6551. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u", rc, ctx->ctx_id);
  6552. goto end;
  6553. }
  6554. CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x",
  6555. ctx->ctx_id, ctx->link_hdl);
  6556. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  6557. start_isp.hw_config.request_id = req->request_id;
  6558. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  6559. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  6560. start_isp.hw_config.priv = &req_isp->hw_update_data;
  6561. start_isp.hw_config.init_packet = 1;
  6562. start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ;
  6563. start_isp.hw_config.cdm_reset_before_apply = false;
  6564. start_isp.start_only = true;
  6565. start_isp.is_internal_start = true;
  6566. __cam_isp_context_reset_internal_recovery_params(ctx_isp);
  6567. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  6568. CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
  6569. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  6570. &start_isp);
  6571. if (rc) {
  6572. CAM_ERR(CAM_ISP, "Start HW failed");
  6573. ctx->state = CAM_CTX_READY;
  6574. goto end;
  6575. }
  6576. /* IQ applied for this request, on next trigger skip IQ cfg */
  6577. req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO;
  6578. /* Notify userland that KMD has done internal recovery */
  6579. __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY,
  6580. 0, req->request_id, ctx);
  6581. CAM_INFO(CAM_ISP, "Internal Start HW success ctx %u on link: 0x%x for req: %llu",
  6582. ctx->ctx_id, ctx->link_hdl, req->request_id);
  6583. end:
  6584. return rc;
  6585. }
  6586. static bool __cam_isp_ctx_try_internal_recovery_for_bubble(
  6587. int64_t error_req_id, struct cam_context *ctx)
  6588. {
  6589. int rc;
  6590. struct cam_isp_context *ctx_isp =
  6591. (struct cam_isp_context *)ctx->ctx_priv;
  6592. if (isp_ctx_debug.disable_internal_recovery_mask &
  6593. CAM_ISP_CTX_DISABLE_RECOVERY_BUBBLE)
  6594. return false;
  6595. /* Perform recovery if bubble recovery is stalled */
  6596. if (!atomic_read(&ctx_isp->process_bubble))
  6597. return false;
  6598. /* Validate if errored request has been applied */
  6599. if (ctx_isp->last_applied_req_id < error_req_id) {
  6600. CAM_WARN(CAM_ISP,
  6601. "Skip trying for internal recovery last applied: %lld error_req: %lld for ctx: %u on link: 0x%x",
  6602. ctx_isp->last_applied_req_id, error_req_id,
  6603. ctx->ctx_id, ctx->link_hdl);
  6604. return false;
  6605. }
  6606. if (__cam_isp_ctx_validate_for_req_reapply_util(ctx_isp)) {
  6607. CAM_WARN(CAM_ISP,
  6608. "Internal recovery not possible for ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6609. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6610. return false;
  6611. }
  6612. /* Trigger reset and recover */
  6613. atomic_set(&ctx_isp->internal_recovery_set, 1);
  6614. rc = __cam_isp_ctx_reset_and_recover(false, ctx);
  6615. if (rc) {
  6616. CAM_WARN(CAM_ISP,
  6617. "Internal recovery failed in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6618. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6619. atomic_set(&ctx_isp->internal_recovery_set, 0);
  6620. goto error;
  6621. }
  6622. CAM_DBG(CAM_ISP,
  6623. "Internal recovery done in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]",
  6624. ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id);
  6625. return true;
  6626. error:
  6627. return false;
  6628. }
  6629. static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
  6630. struct cam_req_mgr_link_evt_data *link_evt_data)
  6631. {
  6632. int rc = 0;
  6633. struct cam_isp_context *ctx_isp =
  6634. (struct cam_isp_context *) ctx->ctx_priv;
  6635. if ((ctx->state == CAM_CTX_ACQUIRED) &&
  6636. (link_evt_data->evt_type != CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES)) {
  6637. CAM_WARN(CAM_ISP,
  6638. "Get unexpect evt:%d in acquired state",
  6639. link_evt_data->evt_type);
  6640. return -EINVAL;
  6641. }
  6642. switch (link_evt_data->evt_type) {
  6643. case CAM_REQ_MGR_LINK_EVT_ERR:
  6644. case CAM_REQ_MGR_LINK_EVT_EOF:
  6645. /* No handling */
  6646. break;
  6647. case CAM_REQ_MGR_LINK_EVT_PAUSE:
  6648. rc = __cam_isp_ctx_link_pause(ctx);
  6649. break;
  6650. case CAM_REQ_MGR_LINK_EVT_RESUME:
  6651. rc = __cam_isp_ctx_link_resume(ctx);
  6652. break;
  6653. case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
  6654. rc = __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  6655. break;
  6656. case CAM_REQ_MGR_LINK_EVT_STALLED: {
  6657. bool internal_recovery_skipped = false;
  6658. if (ctx->state == CAM_CTX_ACTIVATED) {
  6659. if (link_evt_data->try_for_recovery)
  6660. internal_recovery_skipped =
  6661. __cam_isp_ctx_try_internal_recovery_for_bubble(
  6662. link_evt_data->req_id, ctx);
  6663. if (!internal_recovery_skipped)
  6664. rc = __cam_isp_ctx_trigger_reg_dump(
  6665. CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx);
  6666. }
  6667. link_evt_data->try_for_recovery = internal_recovery_skipped;
  6668. }
  6669. break;
  6670. case CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES:
  6671. if (link_evt_data->u.properties_mask &
  6672. CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF)
  6673. ctx_isp->vfps_aux_context = true;
  6674. else
  6675. ctx_isp->vfps_aux_context = false;
  6676. CAM_DBG(CAM_ISP, "vfps_aux_context:%s on ctx: %u",
  6677. CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context), ctx->ctx_id);
  6678. break;
  6679. default:
  6680. CAM_WARN(CAM_ISP,
  6681. "Unsupported event type: 0x%x on ctx: %u",
  6682. link_evt_data->evt_type, ctx->ctx_id);
  6683. rc = -EINVAL;
  6684. break;
  6685. }
  6686. return rc;
  6687. }
  6688. static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
  6689. struct cam_req_mgr_core_dev_link_setup *unlink)
  6690. {
  6691. int rc = 0;
  6692. CAM_WARN(CAM_ISP,
  6693. "Received unlink in activated state. It's unexpected");
  6694. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  6695. if (rc)
  6696. CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
  6697. rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
  6698. if (rc)
  6699. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  6700. return rc;
  6701. }
  6702. static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
  6703. struct cam_req_mgr_apply_request *apply)
  6704. {
  6705. int rc = 0;
  6706. struct cam_ctx_ops *ctx_ops = NULL;
  6707. struct cam_isp_context *ctx_isp =
  6708. (struct cam_isp_context *) ctx->ctx_priv;
  6709. trace_cam_apply_req("ISP", ctx->ctx_id, apply->request_id, apply->link_hdl);
  6710. CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld",
  6711. __cam_isp_ctx_substate_val_to_type(
  6712. ctx_isp->substate_activated), apply->request_id);
  6713. ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
  6714. if (ctx_ops->crm_ops.apply_req) {
  6715. rc = ctx_ops->crm_ops.apply_req(ctx, apply);
  6716. } else {
  6717. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6718. "No handle function in activated Substate[%s]",
  6719. __cam_isp_ctx_substate_val_to_type(
  6720. ctx_isp->substate_activated));
  6721. rc = -EFAULT;
  6722. }
  6723. if (rc)
  6724. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6725. "Apply failed in active Substate[%s] rc %d",
  6726. __cam_isp_ctx_substate_val_to_type(
  6727. ctx_isp->substate_activated), rc);
  6728. return rc;
  6729. }
  6730. static int __cam_isp_ctx_apply_default_settings(
  6731. struct cam_context *ctx,
  6732. struct cam_req_mgr_apply_request *apply)
  6733. {
  6734. int rc = 0;
  6735. struct cam_ctx_ops *ctx_ops = NULL;
  6736. struct cam_isp_context *ctx_isp =
  6737. (struct cam_isp_context *) ctx->ctx_priv;
  6738. if ((!ctx_isp->use_default_apply) && !(atomic_read(&ctx_isp->internal_recovery_set)))
  6739. return 0;
  6740. if (!(apply->trigger_point & ctx_isp->subscribe_event)) {
  6741. CAM_WARN(CAM_ISP,
  6742. "Trigger: %u not subscribed for: %u",
  6743. apply->trigger_point, ctx_isp->subscribe_event);
  6744. return 0;
  6745. }
  6746. /* Allow apply default settings for IFE only at SOF */
  6747. if (apply->trigger_point != CAM_TRIGGER_POINT_SOF)
  6748. return 0;
  6749. if (atomic_read(&ctx_isp->internal_recovery_set))
  6750. return __cam_isp_ctx_reset_and_recover(false, ctx);
  6751. CAM_DBG(CAM_ISP,
  6752. "Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x",
  6753. ctx_isp->substate_activated, apply->request_id,
  6754. ctx->ctx_id, ctx->link_hdl);
  6755. ctx_ops = &ctx_isp->substate_machine[
  6756. ctx_isp->substate_activated];
  6757. if (ctx_ops->crm_ops.notify_frame_skip) {
  6758. rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply);
  6759. } else {
  6760. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6761. "No handle function in activated substate %d",
  6762. ctx_isp->substate_activated);
  6763. rc = -EFAULT;
  6764. }
  6765. if (rc)
  6766. CAM_WARN_RATE_LIMIT(CAM_ISP,
  6767. "Apply default failed in active substate %d rc %d",
  6768. ctx_isp->substate_activated, rc);
  6769. return rc;
  6770. }
  6771. static int __cam_isp_ctx_handle_irq_in_activated(void *context,
  6772. uint32_t evt_id, void *evt_data)
  6773. {
  6774. int rc = 0;
  6775. struct cam_isp_ctx_irq_ops *irq_ops = NULL;
  6776. struct cam_context *ctx = (struct cam_context *)context;
  6777. struct cam_isp_context *ctx_isp =
  6778. (struct cam_isp_context *)ctx->ctx_priv;
  6779. spin_lock(&ctx->lock);
  6780. trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
  6781. __cam_isp_ctx_get_event_ts(evt_id, evt_data));
  6782. CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d, ctx:%d",
  6783. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6784. ctx_isp->substate_activated), evt_id,
  6785. ctx->ctx_id);
  6786. irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
  6787. if (irq_ops->irq_ops[evt_id]) {
  6788. rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
  6789. } else {
  6790. CAM_DBG(CAM_ISP,
  6791. "No handle function for Substate[%s], evt id %d, ctx:%d",
  6792. __cam_isp_ctx_substate_val_to_type(
  6793. ctx_isp->substate_activated), evt_id,
  6794. ctx->ctx_id);
  6795. if (isp_ctx_debug.enable_state_monitor_dump)
  6796. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  6797. }
  6798. CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s], ctx:%d",
  6799. ctx->state, __cam_isp_ctx_substate_val_to_type(
  6800. ctx_isp->substate_activated), ctx->ctx_id);
  6801. spin_unlock(&ctx->lock);
  6802. return rc;
  6803. }
  6804. static int cam_isp_context_validate_event_notify_injection(struct cam_context *ctx,
  6805. struct cam_hw_inject_evt_param *evt_params)
  6806. {
  6807. int rc = 0;
  6808. uint32_t evt_type;
  6809. uint64_t req_id;
  6810. req_id = evt_params->req_id;
  6811. evt_type = evt_params->u.evt_notify.evt_notify_type;
  6812. switch (evt_type) {
  6813. case V4L_EVENT_CAM_REQ_MGR_ERROR: {
  6814. struct cam_hw_inject_err_evt_param *err_evt_params =
  6815. &evt_params->u.evt_notify.u.err_evt_params;
  6816. switch (err_evt_params->err_type) {
  6817. case CAM_REQ_MGR_ERROR_TYPE_RECOVERY:
  6818. case CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE:
  6819. case CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY:
  6820. case CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY:
  6821. break;
  6822. default:
  6823. CAM_ERR(CAM_ISP,
  6824. "Invalid error type: %u for error event injection err type: %u req id: %llu ctx id: %u dev hdl: %d",
  6825. err_evt_params->err_type, err_evt_params->err_code,
  6826. req_id, ctx->ctx_id, ctx->dev_hdl);
  6827. return -EINVAL;
  6828. }
  6829. CAM_INFO(CAM_ISP,
  6830. "Inject ERR evt: err code: %u err type: %u req id: %llu ctx id: %u dev hdl: %d",
  6831. err_evt_params->err_code, err_evt_params->err_type,
  6832. req_id, ctx->ctx_id, ctx->dev_hdl);
  6833. break;
  6834. }
  6835. case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: {
  6836. struct cam_hw_inject_pf_evt_param *pf_evt_params =
  6837. &evt_params->u.evt_notify.u.pf_evt_params;
  6838. bool non_fatal_en;
  6839. rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en);
  6840. if (rc) {
  6841. CAM_ERR(CAM_ISP,
  6842. "Fail to query whether device's cb has non-fatal enabled rc:%d",
  6843. rc);
  6844. return rc;
  6845. }
  6846. if (!non_fatal_en) {
  6847. CAM_ERR(CAM_ISP,
  6848. "Fail to inject page fault event notification. Page fault is fatal for ISP");
  6849. return -EINVAL;
  6850. }
  6851. CAM_INFO(CAM_ISP,
  6852. "Inject PF evt: req_id: %llu ctx id: %u dev hdl: %d ctx found: %hhu",
  6853. req_id, ctx->ctx_id, ctx->dev_hdl, pf_evt_params->ctx_found);
  6854. break;
  6855. }
  6856. default:
  6857. CAM_ERR(CAM_ISP, "Event notification type not supported: %u", evt_type);
  6858. rc = -EINVAL;
  6859. }
  6860. return rc;
  6861. }
  6862. static int cam_isp_context_inject_evt(void *context, void *evt_args)
  6863. {
  6864. struct cam_context *ctx = context;
  6865. struct cam_isp_context *ctx_isp = NULL;
  6866. struct cam_hw_inject_evt_param *evt_params = evt_args;
  6867. int rc = 0;
  6868. if (!ctx || !evt_args) {
  6869. CAM_ERR(CAM_ISP,
  6870. "Invalid params ctx %s event args %s",
  6871. CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args));
  6872. return -EINVAL;
  6873. }
  6874. ctx_isp = ctx->ctx_priv;
  6875. if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE) {
  6876. rc = cam_isp_context_validate_event_notify_injection(ctx, evt_params);
  6877. if (rc) {
  6878. CAM_ERR(CAM_ISP,
  6879. "Event notification injection failed validation rc: %d", rc);
  6880. return rc;
  6881. }
  6882. } else {
  6883. CAM_ERR(CAM_ISP, "Buffer done err injection %u not supported by ISP",
  6884. evt_params->inject_id);
  6885. return -EINVAL;
  6886. }
  6887. memcpy(&ctx_isp->evt_inject_params, evt_params,
  6888. sizeof(struct cam_hw_inject_evt_param));
  6889. ctx_isp->evt_inject_params.is_valid = true;
  6890. return rc;
  6891. }
  6892. /* top state machine */
  6893. static struct cam_ctx_ops
  6894. cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  6895. /* Uninit */
  6896. {
  6897. .ioctl_ops = {},
  6898. .crm_ops = {},
  6899. .irq_ops = NULL,
  6900. },
  6901. /* Available */
  6902. {
  6903. .ioctl_ops = {
  6904. .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
  6905. },
  6906. .crm_ops = {},
  6907. .irq_ops = NULL,
  6908. },
  6909. /* Acquired */
  6910. {
  6911. .ioctl_ops = {
  6912. .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
  6913. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  6914. .config_dev = __cam_isp_ctx_config_dev_in_acquired,
  6915. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6916. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  6917. },
  6918. .crm_ops = {
  6919. .link = __cam_isp_ctx_link_in_acquired,
  6920. .unlink = __cam_isp_ctx_unlink_in_acquired,
  6921. .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
  6922. .process_evt = __cam_isp_ctx_process_evt,
  6923. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  6924. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6925. },
  6926. .irq_ops = NULL,
  6927. .pagefault_ops = cam_isp_context_dump_requests,
  6928. .dumpinfo_ops = cam_isp_context_info_dump,
  6929. .evt_inject_ops = cam_isp_context_inject_evt,
  6930. },
  6931. /* Ready */
  6932. {
  6933. .ioctl_ops = {
  6934. .start_dev = __cam_isp_ctx_start_dev_in_ready,
  6935. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  6936. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  6937. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6938. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  6939. },
  6940. .crm_ops = {
  6941. .unlink = __cam_isp_ctx_unlink_in_ready,
  6942. .flush_req = __cam_isp_ctx_flush_req_in_ready,
  6943. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6944. },
  6945. .irq_ops = NULL,
  6946. .pagefault_ops = cam_isp_context_dump_requests,
  6947. .dumpinfo_ops = cam_isp_context_info_dump,
  6948. .evt_inject_ops = cam_isp_context_inject_evt,
  6949. },
  6950. /* Flushed */
  6951. {
  6952. .ioctl_ops = {
  6953. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  6954. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  6955. .config_dev = __cam_isp_ctx_config_dev_in_flushed,
  6956. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  6957. },
  6958. .crm_ops = {
  6959. .unlink = __cam_isp_ctx_unlink_in_ready,
  6960. .process_evt = __cam_isp_ctx_process_evt,
  6961. .flush_req = __cam_isp_ctx_flush_req_in_flushed_state,
  6962. },
  6963. .irq_ops = NULL,
  6964. .pagefault_ops = cam_isp_context_dump_requests,
  6965. .dumpinfo_ops = cam_isp_context_info_dump,
  6966. .evt_inject_ops = cam_isp_context_inject_evt,
  6967. },
  6968. /* Activated */
  6969. {
  6970. .ioctl_ops = {
  6971. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  6972. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  6973. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  6974. .flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
  6975. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  6976. },
  6977. .crm_ops = {
  6978. .unlink = __cam_isp_ctx_unlink_in_activated,
  6979. .apply_req = __cam_isp_ctx_apply_req,
  6980. .notify_frame_skip =
  6981. __cam_isp_ctx_apply_default_settings,
  6982. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  6983. .process_evt = __cam_isp_ctx_process_evt,
  6984. .dump_req = __cam_isp_ctx_dump_in_top_state,
  6985. },
  6986. .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
  6987. .pagefault_ops = cam_isp_context_dump_requests,
  6988. .dumpinfo_ops = cam_isp_context_info_dump,
  6989. .recovery_ops = cam_isp_context_hw_recovery,
  6990. .evt_inject_ops = cam_isp_context_inject_evt,
  6991. },
  6992. };
  6993. static int cam_isp_context_hw_recovery(void *priv, void *data)
  6994. {
  6995. struct cam_context *ctx = priv;
  6996. int rc = -EPERM;
  6997. if (ctx->hw_mgr_intf->hw_recovery)
  6998. rc = ctx->hw_mgr_intf->hw_recovery(ctx->hw_mgr_intf->hw_mgr_priv, data);
  6999. else
  7000. CAM_ERR(CAM_ISP, "hw mgr doesn't support recovery");
  7001. return rc;
  7002. }
  7003. static void cam_isp_context_find_faulted_context(struct cam_context *ctx,
  7004. struct list_head *req_list, struct cam_hw_dump_pf_args *pf_args, bool *found)
  7005. {
  7006. struct cam_ctx_request *req = NULL;
  7007. struct cam_ctx_request *req_temp = NULL;
  7008. int rc;
  7009. *found = false;
  7010. list_for_each_entry_safe(req, req_temp, req_list, list) {
  7011. CAM_INFO(CAM_ISP, "List req_id: %llu ctx id: %u",
  7012. req->request_id, ctx->ctx_id);
  7013. rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data);
  7014. if (rc)
  7015. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  7016. /*
  7017. * Found faulted buffer. Even if faulted ctx is found, but
  7018. * continue to search for faulted buffer
  7019. */
  7020. if (pf_args->pf_context_info.mem_type != CAM_FAULT_BUF_NOT_FOUND) {
  7021. *found = true;
  7022. break;
  7023. }
  7024. }
  7025. }
  7026. static int cam_isp_context_dump_requests(void *data, void *args)
  7027. {
  7028. struct cam_context *ctx = (struct cam_context *)data;
  7029. struct cam_isp_context *ctx_isp;
  7030. struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args;
  7031. int rc = 0;
  7032. bool found;
  7033. if (!ctx || !pf_args) {
  7034. CAM_ERR(CAM_ISP, "Invalid ctx %pK or pf args %pK",
  7035. ctx, pf_args);
  7036. return -EINVAL;
  7037. }
  7038. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  7039. if (!ctx_isp) {
  7040. CAM_ERR(CAM_ISP, "Invalid isp ctx");
  7041. return -EINVAL;
  7042. }
  7043. if (pf_args->handle_sec_pf)
  7044. goto end;
  7045. CAM_INFO(CAM_ISP,
  7046. "Iterating over active list for isp ctx %d state %d",
  7047. ctx->ctx_id, ctx->state);
  7048. cam_isp_context_find_faulted_context(ctx, &ctx->active_req_list,
  7049. pf_args, &found);
  7050. if (found)
  7051. goto end;
  7052. CAM_INFO(CAM_ISP,
  7053. "Iterating over waiting list of isp ctx %d state %d",
  7054. ctx->ctx_id, ctx->state);
  7055. cam_isp_context_find_faulted_context(ctx, &ctx->wait_req_list,
  7056. pf_args, &found);
  7057. if (found)
  7058. goto end;
  7059. /*
  7060. * In certain scenarios we observe both overflow and SMMU pagefault
  7061. * for a particular request. If overflow is handled before page fault
  7062. * we need to traverse through pending request list because if
  7063. * bubble recovery is enabled on any request we move that request
  7064. * and all the subsequent requests to the pending list while handling
  7065. * overflow error.
  7066. */
  7067. CAM_INFO(CAM_ISP,
  7068. "Iterating over pending req list of isp ctx %d state %d",
  7069. ctx->ctx_id, ctx->state);
  7070. cam_isp_context_find_faulted_context(ctx, &ctx->pending_req_list,
  7071. pf_args, &found);
  7072. if (found)
  7073. goto end;
  7074. end:
  7075. if (pf_args->pf_context_info.resource_type) {
  7076. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  7077. CAM_INFO(CAM_ISP,
  7078. "Page fault on resource:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  7079. __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type,
  7080. pf_args->pf_context_info.resource_type),
  7081. pf_args->pf_context_info.resource_type, ctx->ctx_id, ctx_isp->frame_id,
  7082. ctx_isp->reported_req_id, ctx_isp->last_applied_req_id);
  7083. }
  7084. /*
  7085. * Send PF notification to UMD if PF found on current CTX
  7086. * or it is forced to send PF notification to UMD even if no
  7087. * faulted context found
  7088. */
  7089. if (pf_args->pf_context_info.ctx_found ||
  7090. pf_args->pf_context_info.force_send_pf_evt)
  7091. rc = cam_context_send_pf_evt(ctx, pf_args);
  7092. if (rc)
  7093. CAM_ERR(CAM_ISP,
  7094. "Failed to notify PF event to userspace rc: %d", rc);
  7095. return rc;
  7096. }
  7097. static int cam_isp_context_debug_register(void)
  7098. {
  7099. int rc = 0;
  7100. struct dentry *dbgfileptr = NULL;
  7101. if (!cam_debugfs_available())
  7102. return 0;
  7103. rc = cam_debugfs_create_subdir("isp_ctx", &dbgfileptr);
  7104. if (rc) {
  7105. CAM_ERR(CAM_ISP, "DebugFS could not create directory!");
  7106. return rc;
  7107. }
  7108. /* Store parent inode for cleanup in caller */
  7109. isp_ctx_debug.dentry = dbgfileptr;
  7110. debugfs_create_u32("enable_state_monitor_dump", 0644,
  7111. isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
  7112. debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
  7113. isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
  7114. debugfs_create_u32("disable_internal_recovery_mask", 0644,
  7115. isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery_mask);
  7116. return 0;
  7117. }
  7118. int cam_isp_context_init(struct cam_isp_context *ctx,
  7119. struct cam_context *ctx_base,
  7120. struct cam_req_mgr_kmd_ops *crm_node_intf,
  7121. struct cam_hw_mgr_intf *hw_intf,
  7122. uint32_t ctx_id,
  7123. uint32_t isp_device_type,
  7124. int img_iommu_hdl)
  7125. {
  7126. int rc = -1;
  7127. int i;
  7128. if (!ctx || !ctx_base) {
  7129. CAM_ERR(CAM_ISP, "Invalid Context");
  7130. goto err;
  7131. }
  7132. /* ISP context setup */
  7133. memset(ctx, 0, sizeof(*ctx));
  7134. ctx->base = ctx_base;
  7135. ctx->frame_id = 0;
  7136. ctx->custom_enabled = false;
  7137. ctx->use_frame_header_ts = false;
  7138. ctx->use_default_apply = false;
  7139. ctx->active_req_cnt = 0;
  7140. ctx->reported_req_id = 0;
  7141. ctx->bubble_frame_cnt = 0;
  7142. ctx->req_info.last_bufdone_req_id = 0;
  7143. ctx->v4l2_event_sub_ids = 0;
  7144. ctx->hw_ctx = NULL;
  7145. ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  7146. ctx->substate_machine = cam_isp_ctx_activated_state_machine;
  7147. ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
  7148. ctx->init_timestamp = jiffies_to_msecs(jiffies);
  7149. ctx->isp_device_type = isp_device_type;
  7150. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  7151. ctx->req_base[i].req_priv = &ctx->req_isp[i];
  7152. ctx->req_isp[i].base = &ctx->req_base[i];
  7153. }
  7154. /* camera context setup */
  7155. rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
  7156. crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX, img_iommu_hdl);
  7157. if (rc) {
  7158. CAM_ERR(CAM_ISP, "Camera Context Base init failed");
  7159. goto err;
  7160. }
  7161. /* link camera context with isp context */
  7162. ctx_base->state_machine = cam_isp_ctx_top_state_machine;
  7163. ctx_base->ctx_priv = ctx;
  7164. /* initializing current state for error logging */
  7165. for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
  7166. ctx->cam_isp_ctx_state_monitor[i].curr_state =
  7167. CAM_ISP_CTX_ACTIVATED_MAX;
  7168. }
  7169. atomic64_set(&ctx->state_monitor_head, -1);
  7170. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  7171. atomic64_set(&ctx->event_record_head[i], -1);
  7172. if (!isp_ctx_debug.dentry)
  7173. cam_isp_context_debug_register();
  7174. err:
  7175. return rc;
  7176. }
  7177. int cam_isp_context_deinit(struct cam_isp_context *ctx)
  7178. {
  7179. if (ctx->base)
  7180. cam_context_deinit(ctx->base);
  7181. if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
  7182. CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid",
  7183. __cam_isp_ctx_substate_val_to_type(
  7184. ctx->substate_activated));
  7185. isp_ctx_debug.dentry = NULL;
  7186. memset(ctx, 0, sizeof(*ctx));
  7187. return 0;
  7188. }